diff --git a/.DS_Store b/.DS_Store
index 49ec909d4a2edc342593b083d5485c3037bef0ed..837b005cf49930c5ead4ac78ca4b5abed2e970f5 100644
Binary files a/.DS_Store and b/.DS_Store differ
diff --git a/plotting.py b/plotting.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ce647646ee75ca2845919db6734d4203c2a1ee2
--- /dev/null
+++ b/plotting.py
@@ -0,0 +1,41 @@
+import matplotlib.pyplot as plt
+import numpy as np
+from simulate import Simulate as sim
+from tmc import TransitionMatrixCalculator as tmc
+from markovDecision import MarkovDecisionSolver as mD
+
+def get_results(layouts, circle, n_iterations=100):
+    results_markov = []
+    results_safe = []
+    results_normal = []
+    results_risky = []
+    results_random = []
+    
+    for layout in layouts:
+        # Compute optimal policy 
+        expec, policy = mD(layout, circle).solve()
+        
+        # Simulate game using Simulate class
+        sim_instance = sim(layout, circle)
+        result_markov = sim_instance.simulate_game(policy, n_iterations)
+        results_markov.append(result_markov)
+        
+        # Simulate with fixed strategies using Simulate class
+        results_safe.append(sim_instance.simulate_game([1]*15, n_iterations))
+        results_normal.append(sim_instance.simulate_game([2]*15, n_iterations))
+        results_risky.append(sim_instance.simulate_game([3]*15, n_iterations))
+        results_random.append(sim_instance.simulate_game(np.random.randint(1, 4, size=15), n_iterations))
+
+    return results_markov, results_safe, results_normal, results_risky, results_random
+
+# Utilisation de la fonction get_results pour obtenir les résultats
+layouts = [[0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 1, 0]]  # Exemple de layouts à utiliser
+circle = True  # Exemple de valeur pour circle
+results_markov, results_safe, results_normal, results_risky, results_random = get_results(layouts, circle, n_iterations=100)
+
+# Imprimer les résultats (vous pouvez les enregistrer dans un fichier si nécessaire)
+print("Results Markov:", results_markov)
+print("Results Safe:", results_safe)
+print("Results Normal:", results_normal)
+print("Results Risky:", results_risky)
+print("Results Random:", results_random)
diff --git a/simulate.py b/simulate.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbaa211f8998013a57c5536726a1a0c083a5baf3
--- /dev/null
+++ b/simulate.py
@@ -0,0 +1,61 @@
+import random as rd
+import numpy as np
+from tmc import TransitionMatrixCalculator as tmc
+from markovDecision import MarkovDecisionSolver as mD
+
+
+class Simulate:
+    def __init__(self, layout, circle):
+        self.layout = layout
+        self.circle = circle
+        self.tmc_instance = tmc()
+        self.safe_dice, self.normal_dice, self.risky_dice = self.tmc_instance.compute_transition_matrix(layout, circle)
+        self.transition_matrices = [self.safe_dice, self.normal_dice, self.risky_dice]
+
+    def simulate_game(self, strategy, n_iterations=10000):
+        number_turns = []
+        for _ in range(n_iterations):
+            total_turns = 0
+            state = 0  # initial state
+
+            while state < len(self.layout) - 1:  # until goal state is reached
+                action = strategy[state]  # get action according to strategy
+                transition_matrix = self.transition_matrices[int(action) - 1]
+                state = np.random.choice(len(self.layout), p=transition_matrix[state])
+
+                if self.layout[state] == 3 and action == 2:
+                    total_turns += rd.choice([1, 2], p=[0.5, 0.5])
+                elif self.layout[state] == 3 and action == 3:
+                    total_turns += 2
+                else:
+                    total_turns += 1
+
+            number_turns.append(total_turns)
+
+        return np.mean(number_turns)
+
+    def simulate_state(self, strategy, n_iterations=10000):
+        number_mean = []
+        for _ in range(n_iterations):
+            number_turns = []
+
+            for state in range(len(self.layout) - 1):
+                total_turns = 0
+
+                while state < len(self.layout) - 1:
+                    print("Current state:", state)
+                    print("Transition matrix:", transition_matrix[state])
+                    state = np.random.choice(len(self.layout), p=transition_matrix[state])
+
+                    if self.layout[state] == 3 and action == 2:
+                        total_turns += rd.choice([1, 2], p=[0.5, 0.5])
+                    elif self.layout[state] == 3 and action == 3:
+                        total_turns += 2
+                    else:
+                        total_turns += 1
+
+                number_turns.append(total_turns)
+
+            number_mean.append(number_turns)
+
+        return np.mean(number_mean, axis=0)
diff --git a/plot.py b/test_files/plot.py
similarity index 100%
rename from plot.py
rename to test_files/plot.py
diff --git a/validation.py b/test_files/validation.py
similarity index 100%
rename from validation.py
rename to test_files/validation.py