Skip to content
Extraits de code Groupes Projets
Valider b775d77f rédigé par Adrien Payen's avatar Adrien Payen
Parcourir les fichiers

commit plots and simulation

parent 649804ae
Aucune branche associée trouvée
Aucune étiquette associée trouvée
Aucune requête de fusion associée trouvée
Aucun aperçu pour ce type de fichier
import matplotlib.pyplot as plt
import numpy as np
from simulate import Simulate as sim
from tmc import TransitionMatrixCalculator as tmc
from markovDecision import MarkovDecisionSolver as mD
def get_results(layouts, circle, n_iterations=100):
results_markov = []
results_safe = []
results_normal = []
results_risky = []
results_random = []
for layout in layouts:
# Compute optimal policy
expec, policy = mD(layout, circle).solve()
# Simulate game using Simulate class
sim_instance = sim(layout, circle)
result_markov = sim_instance.simulate_game(policy, n_iterations)
results_markov.append(result_markov)
# Simulate with fixed strategies using Simulate class
results_safe.append(sim_instance.simulate_game([1]*15, n_iterations))
results_normal.append(sim_instance.simulate_game([2]*15, n_iterations))
results_risky.append(sim_instance.simulate_game([3]*15, n_iterations))
results_random.append(sim_instance.simulate_game(np.random.randint(1, 4, size=15), n_iterations))
return results_markov, results_safe, results_normal, results_risky, results_random
# Utilisation de la fonction get_results pour obtenir les résultats
layouts = [[0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 1, 0]] # Exemple de layouts à utiliser
circle = True # Exemple de valeur pour circle
results_markov, results_safe, results_normal, results_risky, results_random = get_results(layouts, circle, n_iterations=100)
# Imprimer les résultats (vous pouvez les enregistrer dans un fichier si nécessaire)
print("Results Markov:", results_markov)
print("Results Safe:", results_safe)
print("Results Normal:", results_normal)
print("Results Risky:", results_risky)
print("Results Random:", results_random)
import random as rd
import numpy as np
from tmc import TransitionMatrixCalculator as tmc
from markovDecision import MarkovDecisionSolver as mD
class Simulate:
def __init__(self, layout, circle):
self.layout = layout
self.circle = circle
self.tmc_instance = tmc()
self.safe_dice, self.normal_dice, self.risky_dice = self.tmc_instance.compute_transition_matrix(layout, circle)
self.transition_matrices = [self.safe_dice, self.normal_dice, self.risky_dice]
def simulate_game(self, strategy, n_iterations=10000):
number_turns = []
for _ in range(n_iterations):
total_turns = 0
state = 0 # initial state
while state < len(self.layout) - 1: # until goal state is reached
action = strategy[state] # get action according to strategy
transition_matrix = self.transition_matrices[int(action) - 1]
state = np.random.choice(len(self.layout), p=transition_matrix[state])
if self.layout[state] == 3 and action == 2:
total_turns += rd.choice([1, 2], p=[0.5, 0.5])
elif self.layout[state] == 3 and action == 3:
total_turns += 2
else:
total_turns += 1
number_turns.append(total_turns)
return np.mean(number_turns)
def simulate_state(self, strategy, n_iterations=10000):
number_mean = []
for _ in range(n_iterations):
number_turns = []
for state in range(len(self.layout) - 1):
total_turns = 0
while state < len(self.layout) - 1:
print("Current state:", state)
print("Transition matrix:", transition_matrix[state])
state = np.random.choice(len(self.layout), p=transition_matrix[state])
if self.layout[state] == 3 and action == 2:
total_turns += rd.choice([1, 2], p=[0.5, 0.5])
elif self.layout[state] == 3 and action == 3:
total_turns += 2
else:
total_turns += 1
number_turns.append(total_turns)
number_mean.append(number_turns)
return np.mean(number_mean, axis=0)
Fichier déplacé
Fichier déplacé
0% Chargement en cours ou .
You are about to add 0 people to the discussion. Proceed with caution.
Terminez d'abord l'édition de ce message.
Veuillez vous inscrire ou vous pour commenter