This is an example of how to use the netdem library.
#include <filesystem>
using namespace std;
void TrainCodedNetSDF() {
vector<string> template_names;
ifstream file("template_dict.txt");
if (file.is_open()) {
string line;
while (getline(file, line)) {
template_names.push_back(line);
}
file.close();
} else {
cout << "file template_dict.txt not exist" << endl;
return;
}
int num_templates = template_names.size();
vector<arma::mat> dataset(num_templates);
for (int i = 0; i < template_names.size(); i++) {
bool loadedDataset = mlpack::data::Load(
template_names[i] + "/archived/dataset_SDF.txt", dataset[i], true);
if (!loadedDataset) {
cout << "loading data erros ..." << endl;
return;
}
cout << template_names[i] << ": " << dataset[i].n_cols << " samples"
<< endl;
}
cout << "finished loading data ..." << endl;
vector<int> num_train_samples_each(num_templates),
num_test_samples_each(num_templates);
arma::mat train_data, test_data;
for (int i = 0; i < num_templates; i++) {
arma::mat tmp_train_data, tmp_test_data;
mlpack::data::Split(dataset[i], tmp_train_data, tmp_test_data, 0.1);
num_train_samples_each[i] = tmp_train_data.n_cols;
num_test_samples_each[i] = tmp_test_data.n_cols;
train_data.insert_cols(train_data.n_cols, tmp_train_data);
test_data.insert_cols(test_data.n_cols, tmp_test_data);
}
cout << "finished preparing training and testing data ..." << endl;
int num_x{3}, num_y{1};
arma::mat train_x = train_data.rows(0, num_x - 1);
arma::mat train_y = train_data.row(num_x);
arma::mat test_x = test_data.rows(0, num_x - 1);
arma::mat test_y = test_data.row(num_x);
const size_t num_latent_code = 5;
const int num_nodes = 50, num_hidden_layers = 2;
vector<VecNd<num_latent_code>> latent_code(num_templates);
auto &random_engine = RandomEngine::GetInstance();
for (int i = 0; i < num_templates; i++) {
auto tmp_data =
random_engine.GetUniformDistribution(-1.0, 1.0, num_latent_code);
for (int j = 0; j < num_latent_code; j++) {
latent_code[i][j] = tmp_data[j];
}
}
arma::mat train_x_coded(num_latent_code + train_x.n_rows, train_x.n_cols);
arma::mat test_x_coded(num_latent_code + test_x.n_rows, test_x.n_cols);
int id_start = 0;
for (int ii = 0; ii < num_templates; ii++) {
for (int i = 0; i < num_latent_code; i++) {
for (int j = 0; j < num_train_samples_each[ii]; j++) {
train_x_coded(i, id_start + j) = latent_code[ii][i];
}
}
id_start += num_train_samples_each[ii];
}
for (int i = num_latent_code; i < train_x_coded.n_rows; i++) {
for (int j = 0; j < train_x_coded.n_cols; j++) {
train_x_coded(i, j) = train_x(i - num_latent_code, j);
}
}
id_start = 0;
for (int ii = 0; ii < num_templates; ii++) {
for (int i = 0; i < num_latent_code; i++) {
for (int j = 0; j < num_test_samples_each[ii]; j++) {
test_x_coded(i, id_start + j) = latent_code[ii][i];
}
}
id_start += num_test_samples_each[ii];
}
for (int i = num_latent_code; i < test_x_coded.n_rows; i++) {
for (int j = 0; j < test_x_coded.n_cols; j++) {
test_x_coded(i, j) = test_x(i - num_latent_code, j);
}
}
model.
AddLayer(MLPackUtils::LayerType::Linear, num_nodes);
model.
AddLayer(MLPackUtils::LayerType::LeakyReLU);
for (int i = 0; i < num_hidden_layers - 1; i++) {
model.
AddLayer(MLPackUtils::LayerType::Linear, num_nodes);
model.
AddLayer(MLPackUtils::LayerType::LeakyReLU);
}
model.
AddLayer(MLPackUtils::LayerType::Linear, train_y.n_rows);
cout << "finished creating ann model ..." << endl << endl;
double test_accuracy_best{1.0e8};
for (int iter = 0; iter < 1000; iter++) {
for (int sub_iter = 0; sub_iter < 10; sub_iter++) {
id_start = 0;
for (int ii = 0; ii < num_templates; ii++) {
for (int i = 0; i < num_latent_code; i++) {
for (int j = 0; j < num_train_samples_each[ii]; j++) {
train_x_coded(i, id_start + j) = latent_code[ii][i];
}
}
id_start += num_train_samples_each[ii];
}
id_start = 0;
for (int ii = 0; ii < num_templates; ii++) {
for (int i = 0; i < num_latent_code; i++) {
for (int j = 0; j < num_test_samples_each[ii]; j++) {
test_x_coded(i, id_start + j) = latent_code[ii][i];
}
}
id_start += num_test_samples_each[ii];
}
model.
Train(train_x_coded, train_y);
double learning_rate{0.0001};
auto pred_y = model.
Predict(train_x_coded);
id_start = 0;
for (int ii = 0; ii < num_templates; ii++) {
for (int j = 0; j < num_train_samples_each[ii]; j++) {
auto jj = id_start + j;
auto dydx = model.
Gradient(train_x_coded.col(jj));
for (int i = 0; i < num_latent_code; i++) {
latent_code[ii][i] -=
learning_rate * (pred_y(0, jj) - train_y(0, jj)) * dydx(0, i);
if (abs(latent_code[ii][i]) > 10.0) {
latent_code[ii][i] =
random_engine.GetUniformDistribution(-1.0, 1.0);
}
}
}
id_start += num_train_samples_each[ii];
}
}
for (auto &tmp_code : latent_code) {
cout << tmp_code << endl;
}
auto pred_y = model.
Predict(train_x_coded);
double train_accuracy = MLPackUtils::GetMAE(train_y, pred_y);
cout << "train mae: " << train_accuracy << endl;
pred_y = model.
Predict(test_x_coded);
double test_accuracy = MLPackUtils::GetMAE(pred_y, test_y);
cout << "test mae: " << test_accuracy << endl;
arma::mat tmp(num_templates, num_latent_code);
for (int i = 0; i < num_templates; i++) {
for (int j = 0; j < num_latent_code; j++) {
tmp(i, j) = latent_code[i][j];
}
}
filesystem::create_directory("training");
mlpack::data::Save("training/latent_code_" + to_string(iter) + ".txt", tmp,
false, false, mlpack::data::FileType::CSVASCII);
model.
Save(
"training/ann_SDF_" + to_string(iter) +
".xml",
"SDF");
if (test_accuracy < test_accuracy_best) {
test_accuracy_best = test_accuracy;
mlpack::data::Save("latent_code.txt", tmp, false, false,
mlpack::data::FileType::CSVASCII);
model.
Save(
"ann_SDF.xml",
"SDF");
}
cout << "finished training iter " << iter << endl << endl;
}
}
A class that represents a feedforward neural network for regression.
Definition regression_net.hpp:21
int epochs
The number of epochs used for training.
Definition regression_net.hpp:42
double stop_tol
The stopping tolerance used for determining when to stop training.
Definition regression_net.hpp:45
arma::mat Gradient(const arma::mat &data_x)
Calculates the gradient of the neural network model with respect to input data.
Definition regression_net.cpp:68
void Save(std::string const &filename, std::string const &label)
Saves the neural network model to disk.
Definition regression_net.cpp:99
int batch_size
The batch size used for mini-batch gradient descent during training.
Definition regression_net.hpp:30
double step_size
The step size used for optimization during training.
Definition regression_net.hpp:27
void AddLayer(MLPackUtils::LayerType layer_name,...)
Adds a layer to the neural network model.
Definition regression_net.cpp:15
void Train(const arma::mat &data_x, const arma::mat &data_y)
Trains the neural network model with data.
Definition regression_net.cpp:51
arma::mat Predict(const arma::mat &data_x)
Predicts with the neural network model using input data.
Definition regression_net.cpp:62
bool enable_logging
Whether or not to enable logging during training.
Definition regression_net.hpp:48
Definition bond_entry.hpp:7