diff --git a/P2/distributedmatrix.cpp b/P2/distributedmatrix.cpp
index d29e0c0148bbb0ccc8142f71cd9d2317b5e16957..3d6d08c198abdd93aa47416b9a0e7f480370b994 100644
--- a/P2/distributedmatrix.cpp
+++ b/P2/distributedmatrix.cpp
@@ -54,14 +54,14 @@ int DistributedMatrix::numCols() const {
 
 double DistributedMatrix::get(int i, int j) const {
     if (j < startCol || j >= startCol + localCols) {
-        throw std::out_of_range("Attempt to access non-local column in get()");
+        throw std::out_of_range("Out of range.");
     }
     return localData.get(i, j - startCol);
 }
 
 void DistributedMatrix::set(int i, int j, double value) {
     if (j < startCol || j >= startCol + localCols) {
-        throw std::out_of_range("Attempt to access non-local column in set()");
+        throw std::out_of_range("Out of range.)");
     }
     localData.set(i, j - startCol, value);
 }
@@ -79,16 +79,17 @@ int DistributedMatrix::localColIndex(int globalColIndex) const {
 }
 
 int DistributedMatrix::ownerProcess(int globalColIndex) const {
-    int remainingCols = globalCols % numProcesses;
+    std::vector<int> localStartProcesses(numProcesses);
 
-    for (int p = 0; p < numProcesses; ++p) {
-        int start = p * localCols + min(p, remainingCols);
-        int cols = localCols + (p < remainingCols ? 1 : 0);
-        if (globalColIndex >= start && globalColIndex < start + cols) {
-            return p;
+    MPI_Allgather(&startCol, 1, MPI_INT, localStartProcesses.data(), 1, MPI_INT, MPI_COMM_WORLD);
+
+    for (int p = 1; p < numProcesses; ++p) {
+        if (globalColIndex >= localStartProcesses[p-1] && globalColIndex < localStartProcesses[p]) {
+            return p - 1;
         }
     }
-    return 0;
+    return numProcesses - 1;
+    
 }
 
 const Matrix& DistributedMatrix::getLocalData() const {
@@ -240,7 +241,6 @@ Matrix DistributedMatrix::multiplyTransposed(const DistributedMatrix &other) con
 }
 
 void sync_matrix(Matrix *matrix, int rank, int src) {
-    // Utiliser const_cast pour supprimer le qualificatif const, car MPI_Bcast nécessite un void*
     MPI_Bcast(const_cast<double*>(matrix->getData().data()), matrix->numRows() * matrix->numCols(), MPI_DOUBLE, src, MPI_COMM_WORLD);
 }