This repository has been archived by the owner on Jan 30, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathfunctions.cc
107 lines (93 loc) · 2.79 KB
/
functions.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
// Copyright PinaPL
//
// functions.cc
// PinaPL
//
#include <math.h>
#include <eigen3/Eigen/Dense>
#include <iostream>
#include <stdexcept>
#include <string>
#include "functions.hh"
double sigmoid(double x) {
return (1/(1+exp(-x)));
}
double sigmoid_fast(double x) {
return (0.5 * tanh(x) + 0.5);
}
double sigmoid_very_fast(double x) {
return (0.5 * x/(1+std::abs(x)) + 0.5);
}
double sigmoid_derivative(double x) {
return x*(1-x);
}
double tanh_derivative(double x) {
return 1-tanh(x)*tanh(x);
}
double tanh_computed_derivative(double x) {
return (1-x*x);
}
double tanh2(double x) {
double y = tanh(x);
return y*y;
}
double tanhyp(double x) {
return tanh(x);
}
double expo(double x) {
return exp(x);
}
Eigen::VectorXd costfunction(Eigen::VectorXd expected_output,
Eigen::VectorXd output) {
// We have two cases :
// The layer size is the output size
if (expected_output.size() == output.size()) {
// in this case, everything is ok, just return the cost
return((expected_output-output).cwiseProduct(expected_output-output));
// If not, we have to make a little hack
} else {
int layer_size = output.size();
// We truncate the output
// We compute the cost function
// We come back to the original size
return(Eigen::MatrixXd::Identity(layer_size, expected_output.size())
* (((Eigen::MatrixXd::Identity(expected_output.size(), layer_size)
* output)-expected_output).array().pow(2).matrix()));
}
}
Eigen::VectorXd costfunction_derivative(Eigen::VectorXd expected_output,
Eigen::VectorXd output) {
// We have two cases
// The layer size is the output size
if (expected_output.size() == output.size()) {
// in this case, everything is ok, just return the cost derivative
return(expected_output-output);
// If not, we have to make a little hack
} else {
int layer_size = output.size();
// We truncate the output
// We compute the costfunction derivative
// We come back to the original size
return(- Eigen::MatrixXd::Identity(layer_size, expected_output.size())
* (((Eigen::MatrixXd::Identity(expected_output.size(), layer_size)
* output)-expected_output)));
}
}
Eigen::VectorXd softmax(Eigen::VectorXd output) {
output = output.unaryExpr(&expo);
double exp_sum = (Eigen::MatrixXd::Ones(1, output.size()) * output)(0);
output = (1/exp_sum) * output;
return(output);
}
double threshold(double x) {
double level = 0.3;
if (x > level) {
return (1);
} else {
return(0);
}
}
std::string open_file(bool dual) {
if (dual) return("symmetrical_reber_train_2.4M.txt");
else return("reber_train_2.4M.txt");
}