Commit e1d1af85 authored by hugopiq's avatar hugopiq
Browse files

modif matrix

parent 328235bf
This diff is collapsed.
......@@ -55,21 +55,26 @@ def train_ANN(saveWeighs=False, saveAlgo=False):
history = model.fit(X_train, labelInd_train,
validation_data=(X_val, labelInd_val), epochs=15)
# métriques de performance:
# compute performances
result = model.evaluate(x=X_test, y=labelInd_test)
print(dict(zip(model.metrics_names, result)))
# learning curves
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 2) # set the vertical range to [0-1]
plt.show()
# pd.DataFrame(history.history).plot(figsize=(8, 5))
# plt.grid(True)
# plt.gca().set_ylim(0, 2) # set the vertical range to [0-1]
# plt.show()
# Save weights to a .h file
list_weights = []
for layer in model.layers:
weights = layer.get_weights()
list_weights.append(weights)
# print(len(list_weights[0][0]))
print(np.shape(list_weights[0][0]))
print(np.shape(list_weights[1][0]))
print(np.shape(list_weights[2][0]))
print(np.shape(list_weights[3][0]))
mean = scaler.mean_
std = np.sqrt(scaler.var_)
mean = transformListToStr(mean)
......@@ -111,7 +116,6 @@ def transformListToStr(list):
def save_file_as_csv(data, name_folder):
genres = data["Classes"].tolist()
paths = data["Paths"].tolist()
print(genres)
......@@ -122,18 +126,4 @@ def save_file_as_csv(data, name_folder):
if __name__ == "__main__":
# # links with google drive and colab
# IN_COLAB = 'google.colab' in sys.modules
# if IN_COLAB:
# from google.colab import drive
# drive.mount('/content/gdrive')
# # remplacer par le chemin vers ce notebook à partir de My Drive/...
# code_folder = '/content/gdrive/My Drive/ENSTA ROB 2020-2022/3A 2021-2022/Embedded Machine Learning'
# %cd "$code_folder"
# # !ls "$code_folder"
# model, history = train_ANN(saveAlgo=False, saveWeighs=False)
# save_model(model, history)
model, history = train_ANN(saveAlgo=False, saveWeighs=True)
# save_model(model, history)
model, history = train_ANN(saveWeighs=True)
......@@ -103,7 +103,6 @@ int ANNModelWeights(std::map<FTYPE, DataVector>
});
reLu(X2);
// layer 3
DataVector X3(w3.size());
std::transform(X3.cbegin(),
X3.cend(),
......@@ -116,7 +115,7 @@ int ANNModelWeights(std::map<FTYPE, DataVector>
softmaxF(X3);
auto max = std::max_element(std::begin(X3), std::end(X3));
for (int i = 0; i < X3.size(); i++)
for (size_t i = 0; i < X3.size(); i++)
{
if (*max == X3[i])
{
......
......@@ -17,9 +17,21 @@ int main(int argc, char **argv)
}
std::map<FTYPE, DataVector> features = compute_features_for(file_path);
int result = ANNModelWeights(features);
int matrix[10][10] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
std::cout << "Style: " << genres[result] << "\n";
std::cout << "Compute accuracy... \n";
real accuracy = compute_accuracy("/home/hugo/Documents/embedded-machine-learning/ANN/file_test.csv", (vFunctionCall)ANNModelWeights);
real accuracy = compute_accuracy("/home/hugo/Documents/embedded-machine-learning/ANN/file_test.csv", (vFunctionCall)ANNModelWeights, matrix);
std::cout << "Accuracy: " << accuracy << "\n";
std::cout << "Confusion matrix: \n";
print_2D_array(matrix);
return 0;
}
\ No newline at end of file
......@@ -17,8 +17,20 @@ int main(int argc, char **argv)
std::map<FTYPE, DataVector> features = compute_features_for(file_path);
std::string result = cartModel(features);
std::cout << "Compute accuracy... \n";
real accuracy = compute_accuracy_CART("/home/hugo/Documents/embedded-machine-learning/CART/file_test.csv", (vFunctionCallCART)cartModel);
int matrix[10][10] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
real accuracy = compute_accuracy_CART("/home/hugo/Documents/embedded-machine-learning/CART/file_test.csv", (vFunctionCallCART)cartModel, matrix);
std::cout << "Accuracy: " << accuracy << "\n";
std::cout << result << std::endl;
std::cout << "Confusion matrix: \n";
print_2D_array(matrix);
return 0;
}
\ No newline at end of file
......@@ -12,7 +12,7 @@ typedef int (*vFunctionCall)(std::map<FTYPE, DataVector>
typedef std::string (*vFunctionCallCART)(std::map<FTYPE, DataVector>
&features);
real compute_accuracy(std::string name_file, vFunctionCall function)
real compute_accuracy(std::string name_file, vFunctionCall function, int matrix[][10])
{
std::ifstream myFile(name_file);
std::vector<std::pair<std::string, std::string>> data;
......@@ -37,23 +37,32 @@ real compute_accuracy(std::string name_file, vFunctionCall function)
}
myFile.close();
int result_model;
int result_real;
real compt_good_result = 0.;
for (size_t i = 0; i < data.size(); i++)
{
std::filesystem::path file_path = data[i].second;
std::map<FTYPE, DataVector> features = compute_features_for(file_path);
std::string result_model = genres[function(features)];
// std::cout << result_model << ":" << data[i].first << std::endl;
if (result_model == data[i].first)
result_model = function(features);
if (result_model == genres_inv[data[i].first])
{
compt_good_result++;
}
}
real result = compt_good_result / (real)data.size();
for (size_t i = 0; i < data.size(); i++)
{
std::filesystem::path file_path = data[i].second;
std::map<FTYPE, DataVector> features = compute_features_for(file_path);
result_model = function(features);
result_real = genres_inv[data[i].first];
matrix[result_real][result_model]++;
}
return result;
}
real compute_accuracy_CART(std::string name_file, vFunctionCallCART function)
real compute_accuracy_CART(std::string name_file, vFunctionCallCART function, int matrix[][10])
{
std::ifstream myFile(name_file);
std::vector<std::pair<std::string, std::string>> data;
......@@ -92,44 +101,18 @@ real compute_accuracy_CART(std::string name_file, vFunctionCallCART function)
}
}
real result = compt_good_result / (real)data.size();
return result;
}
void compute_confusion_matrix(std::string name_file, vFunctionCall function, std::array<std::array<int, 10>, 10> &matrix)
{
std::ifstream myFile(name_file);
std::vector<std::pair<std::string, std::string>> data;
if (!myFile.is_open())
throw std::runtime_error("Could not open file");
std::string line, line2;
std::string val;
// Read each line
while (std::getline(myFile, line))
{
std::pair<std::string, std::string> pair;
std::stringstream s(line);
std::getline(s, line2, ';');
std::stringstream ss(line2);
ss >> val;
pair.first = val; // style
std::getline(s, line2, ';');
std::stringstream ss2(line2);
ss2 >> val;
pair.second = val; // paths file
data.push_back(pair);
}
myFile.close();
for (size_t i = 0; i < data.size(); i++)
{
std::filesystem::path file_path = data[i].second;
std::map<FTYPE, DataVector> features = compute_features_for(file_path);
int result_model = function(features);
int result_model = genres_inv[function(features)];
int result_real = genres_inv[data[i].first];
matrix[result_real][result_model]++;
}
return result;
}
void print_2D_array(std::array<std::array<int, 10>, 10> &matrix)
void print_2D_array(int matrix[][10])
{
std::cout << "{";
for (int i = 0; i < 10; ++i)
......
......@@ -13,22 +13,22 @@ from sklearn.model_selection import GridSearchCV
def trainSVM(saveWeighs=False, saveAlgo=False):
# Get data
dataset = 'build/Extraction/features.csv'
df = pd.read_csv(dataset, header=0)
features = df.columns.values[:-2]
# print(features)
# features = df.columns.values[:-2]
Y = df.Style.values
X = df.values
classes = np.unique(Y)
# TRAIN/TEST SPLIT
# Split set
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.33, random_state=42)
X_test, X_val, Y_test, Y_val = train_test_split(
X_test, Y_test, test_size=0.33, random_state=42)
# Save test'set in order to compute accuracy
X_train = np.delete(X_train, [-1, -2], axis=1)
X_val = np.delete(X_val, [-1, -2], axis=1)
print(Y_test)
new_df = pd.DataFrame(X_test)
files = new_df.iloc[:, -2:]
files.columns = ["Classes", "Paths"]
......@@ -61,12 +61,12 @@ def trainSVM(saveWeighs=False, saveAlgo=False):
with open('/home/hugo/Documents/embedded-machine-learning/SVM/SVMWeight.h', 'w') as f:
f.write("#ifndef SVMWEIGHT_H\n#define SVMWEIGHT_H\n")
f.write("#include <vector>\n")
f.write("const std::vector<std::vector<double>> coefs=" +
transformArrayToStr(coef)+";")
f.write("const std::vector<double> intercept=" +
transformListToStr(intercept)+";")
f.write("const std::vector<double> meanNorm=" + str(mean)+";")
f.write("const std::vector<double> stdNorm=" + str(std)+";")
f.write("const std::vector<DataVector> coefs=" +
transformArrayToStr(coef)+";\n")
f.write("const DataVector intercept=" +
transformListToStr(intercept)+";\n")
f.write("const DataVector meanNorm=" + str(mean)+";\n")
f.write("const DataVector stdNorm=" + str(std)+";\n")
f.write("#endif")
print("Save weights in embedded-machine-learning/SVM/SVMWeight.h file!")
if saveAlgo:
......@@ -75,8 +75,8 @@ def trainSVM(saveWeighs=False, saveAlgo=False):
f.write("#ifndef APPLYSVMALGO_H\n#define APPLYSVMALGO_H\n#include <numeric>\n#include <fstream>\n#include \"../Extraction/features_extraction.h\"\n#include \"../Helpers/globals.h\"\n#include \"../Helpers/signal.h\"\n#include \"../Helpers/au_reading.h\"\n#include <typeinfo>\n")
f.write(
"int SVMModelAlgo(std::map < FTYPE, DataVector > &features){")
f.write("const std::vector<double> meanNorm=" + str(mean)+";")
f.write("const std::vector<double> stdNorm=" + str(std)+";")
f.write("const DataVector meanNorm=" + str(mean)+";")
f.write("const DataVector stdNorm=" + str(std)+";")
f.write(
"DataVector featureVector = features[FTYPE::BINAVG];featureVector.insert(featureVector.end(), features[FTYPE::BINSTDEV].begin(), features[FTYPE::BINSTDEV].end());")
f.write(
......
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -27,7 +27,7 @@ int SVMModelWeights(std::map<FTYPE, DataVector>
return (c - meanFeature[indexM]) / stdFeature[indexS];
});
// SVM
int nbClass = 10;
int nbClass = 0;
for (size_t i = 0; i < coefs.size(); i++)
{
// foreach class ie ligne coef avec intercep (ordonee)
......
......@@ -22,11 +22,18 @@ int main(int argc, char **argv)
int result3 = SVMModelAlgo(features);
std::cout << genres[result3] << std::endl;
std::cout << "Compute accuracy... \n";
real accuracy = compute_accuracy("/home/hugo/Documents/embedded-machine-learning/SVM/file_test.csv", (vFunctionCall)SVMModelWeights);
int matrix[10][10] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
real accuracy = compute_accuracy("/home/hugo/Documents/embedded-machine-learning/SVM/file_test.csv", (vFunctionCall)SVMModelWeights, matrix);
std::cout << "Accuracy: " << accuracy << "\n";
std::array<std::array<int, 10>, 10> matrix = {{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}};
std::cout << "Compute confusion matrix.. \n";
compute_confusion_matrix("/home/hugo/Documents/embedded-machine-learning/SVM/file_test.csv", (vFunctionCall)SVMModelWeights, matrix);
std::cout << "Confusion matrix: \n";
print_2D_array(matrix);
return 0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment