Newer
Older
#include "distributedmatrix.hpp"
#include "matrix.hpp"
#include "mlp_sgd_distributed.cpp"
#include <mpi.h>
#include <iostream>
#include <cassert>
#include <cmath>
#include <functional>
// Fonction utilitaire pour vérifier si deux doubles sont proches
bool approxEqual(double a, double b, double epsilon = 1e-10) {
return std::abs(a - b) < epsilon;
}
// Fonction utilitaire pour vérifier si deux matrices sont égales
bool matricesEqual(const Matrix& a, const Matrix& b, double epsilon = 1e-10) {
if (a.numRows() != b.numRows() || a.numCols() != b.numCols()) {
return false;
}
for (int i = 0; i < a.numRows(); i++) {
for (int j = 0; j < a.numCols(); j++) {
if (!approxEqual(a.get(i, j), b.get(i, j), epsilon)) {
return false;
}
}
}
MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
std::ofstream csvFile;
if (rank == 0) {
csvFile.open("results.csv");
csvFile << "MatrixSize,AverageSequentialTime,AverageDistributedTime,AverageSpeedUp\n";
double totalTimeSeq = 0.0;
double totalTimeDist = 0.0;
// Initialisation des matrices pleines
Matrix matrix1Full(N, N);
Matrix matrix2Full(N, N);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
matrix1Full.set(i, j, i * 5 + j + 1);
matrix2Full.set(i, j, i * 5 + j + 2);
}
// Création des matrices distribuées
DistributedMatrix matrix1(matrix1Full, numProcs);
DistributedMatrix matrix2(matrix2Full, numProcs);
// Chronométrage de l'opération distribuée
MPI_Barrier(MPI_COMM_WORLD);
double startDistributed = MPI_Wtime();
Matrix resultDistributed = matrix1.multiplyTransposed(matrix2);
MPI_Barrier(MPI_COMM_WORLD);
double endDistributed = MPI_Wtime();
double timeDist = endDistributed - startDistributed;
// Chronométrage de l'opération séquentielle
double timeSeq = 0.0;
double startSequential = MPI_Wtime();
Matrix resultSequential = matrix1Full * matrix2Full.transpose();
double endSequential = MPI_Wtime();
timeSeq = endSequential - startSequential;
}
// Broadcast pour que tous les rangs aient les mêmes valeurs de temps
MPI_Bcast(&timeSeq, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Accumuler
totalTimeSeq += timeSeq;
totalTimeDist += timeDist;
}
double avgSeq = totalTimeSeq / 10.0;
double avgDist = totalTimeDist / 10.0;
std::cout << "Avg sequential time: " << avgSeq << " s" << std::endl;
std::cout << "Avg distributed time: " << avgDist << " s" << std::endl;
std::cout << "Avg speed up: " << avgSpeedUp << std::endl;
std::cout << "-----------------------------------------" << std::endl;
csvFile << N << "," << avgSeq << "," << avgDist << "," << avgSpeedUp << "\n";
csvFile.close();
std::cout << "Results written to results.csv\n";
int initialized;
MPI_Initialized(&initialized);
if (!initialized) {
MPI_Init(&argc, &argv);
}
if (rank == 0) {
std::cout << "Starting DistributedMatrix tests..." << std::endl;
}
if (rank == 0) {
std::cout << "All tests passed successfully!" << std::endl;
}
}
catch (std::exception& e) {
if (rank == 0) {
std::cerr << "Test failed with exception: " << e.what() << std::endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}