Newer
Older
from tmc import TransitionMatrixCalculator as tmc
from markovDecision import MarkovDecisionSolver as mD
class Validation:
def __init__(self, layout, circle=False):
self.layout = layout
self.circle = circle
self.tmc_instance = tmc()
self.safe_dice = self.tmc_instance._compute_safe_matrix()
self.normal_dice, _ = self.tmc_instance._compute_normal_matrix(layout, circle)
self.risky_dice, _ = self.tmc_instance._compute_risky_matrix(layout, circle)
solver = mD(self.layout, self.circle)
self.expec, self.optimal_policy = solver.solve()
self.safe_strategy = [1] * len(layout)
self.normal_strategy = [2] * len(layout)
self.risky_strategy = [3] * len(layout)
self.random_strategy = [rd.choice([0, 1, 2, 3]) for _ in range(len(layout))]
'SafeDice': [0] * len(layout),
'NormalDice': [0] * len(layout),
'RiskyDice': [0] * len(layout)
for i, die_type in enumerate(self.layout):
self.costs_by_dice_type['SafeDice'][i] = 1 if die_type == 3 else 0
self.costs_by_dice_type['NormalDice'][i] = 2 if die_type == 3 else 0
self.costs_by_dice_type['RiskyDice'][i] = 3 if die_type == 3 else 0
def simulate_game(self, strategy, n_iterations=10000):
transition_matrices = [self.safe_dice, self.normal_dice, self.risky_dice]
number_turns = []
for _ in range(n_iterations):
total_turns = 0
action_index = int(action) - 1
transition_matrix = transition_matrices[action_index]
flattened_probs = transition_matrix[k]
k = np.random.choice(len(self.layout), p=flattened_probs)
if self.layout[k] == 3 and action == 2:
total_turns += 1 if np.random.uniform(0, 1) < 0.5 else 2
elif self.layout[k] == 3 and action == 3:
total_turns += 2
else:
total_turns += 1
number_turns.append(total_turns)
return np.mean(number_turns)
def simulate_state(self, strategy, layout, circle, n_iterations=10000):
safe_dice = self.tmc_instance._compute_safe_matrix()
normal_dice = self.tmc_instance._compute_normal_matrix(layout, circle)[0]
risky_dice = self.tmc_instance._compute_risky_matrix(layout, circle)[0]
transition_matrices = [safe_dice, normal_dice, risky_dice]
number_turns = []
number_mean = []
for _ in range(n_iterations):
number_turns = []
for state in range(len(layout) - 1):
total_turns = 0
action_index = int(action) - 1
transition_matrix = transition_matrices[action_index]
flattened_probs = transition_matrix[k]
flattened_probs /= np.sum(flattened_probs)
k = np.random.choice(len(layout), p=flattened_probs)
if layout[k] == 3 and action == 2:
total_turns += 1 if np.random.uniform(0, 1) < 0.5 else 2
elif layout[k] == 3 and action == 3:
total_turns += 2
else:
total_turns += 1
number_turns.append(total_turns)
number_mean.append(number_turns)
# calculate the average number of turns for each state
mean_turns = np.mean(number_mean, axis=0)
return mean_turns
def play_optimal_policy(self, n_iterations=10000):
return self.simulate_game(self.optimal_policy, n_iterations)
def play_dice_strategy(self, dice_choice, n_iterations=10000):
strategy = {
'SafeDice': self.safe_strategy,
'NormalDice': self.normal_strategy,
'RiskyDice': self.risky_strategy
}.get(dice_choice, None)
if strategy is None:
raise ValueError("Invalid dice choice")
return self.simulate_game(strategy, n_iterations)
def play_random_strategy(self, n_iterations=10000):
return self.simulate_game(self.random_strategy, n_iterations)
total_turns = 0
while k < len(self.layout) - 1:
k = np.random.choice(len(self.layout), p=flattened_probs)
if self.layout[k] == 3 and action == 2:
total_turns += 1 if np.random.uniform(0, 1) < 0.5 else 2
elif self.layout[k] == 3 and action == 3:
total_turns += 2
else:
total_turns += 1
return total_turns
def compare_empirical_vs_value_iteration(self, num_games=10000):
value_iteration_turns = self.expec
empirical_turns = self.simulate_state(self.optimal_policy, self.layout, self.circle, n_iterations=num_games)
mean_turns_by_state = {
'ValueIteration': value_iteration_turns.tolist(),
'Empirical': empirical_turns.tolist()
}
return mean_turns_by_state
def compare_state_based_turns(self, num_games=10000):
value_iteration = self.expec
empirical_turns = self.simulate_state(self.optimal_policy, self.layout, self.circle, n_iterations=num_games)
optimal_cost = self.simulate_game(self.optimal_policy, n_iterations=num_games)
dice1_cost = self.simulate_game(self.safe_strategy, n_iterations=num_games)
dice2_cost = self.simulate_game(self.normal_strategy, n_iterations=num_games)
dice3_cost = self.simulate_game(self.risky_strategy, n_iterations=num_games)
random_cost = self.simulate_game(self.random_strategy, n_iterations=num_games)
return {
'Optimal': optimal_cost,
'SafeDice': dice1_cost,
'NormalDice': dice2_cost,
'RiskyDice': dice3_cost,
'Random': random_cost
}
layout = [0, 0, 3, 0, 2, 0, 2, 0, 0, 0, 3, 0, 0, 1, 0]
circle = False
# Comparaison entre la stratégie empirique et la value iteration
turns_by_state = validation_instance.compare_empirical_vs_value_iteration(num_games=1000000)
num_states = len(layout)
for state in range(num_states - 1):
print(f"État {state}:")
print(f" ValueIteration - Tours moyens : {turns_by_state['ValueIteration'][state]:.2f}")
print(f" Empirical - Tours moyens : {turns_by_state['Empirical'][state]:.2f}")
empirical_strategy_result = validation_instance.play_empirical_strategy()
print("Coût de la stratégie empirique sur un tour :", empirical_strategy_result)
# Comparaison entre la stratégie empirique et la value iteration sur plusieurs jeux
comparison_result = validation_instance.compare_empirical_vs_value_iteration(num_games=1000000)
print("Coût moyen de la stratégie de value iteration :", comparison_result['ValueIteration'])
print("Coût moyen de la stratégie empirique :", comparison_result['Empirical'])
# Coûts des différentes stratégies
optimal_cost = validation_instance.play_optimal_policy(n_iterations=1000000)
dice1_cost = validation_instance.play_dice_strategy('SafeDice', n_iterations=1000000)
print("Safe Dice Strategy Cost:", dice1_cost)
dice2_cost = validation_instance.play_dice_strategy('NormalDice', n_iterations=1000000)
dice3_cost = validation_instance.play_dice_strategy('RiskyDice', n_iterations=1000000)
print("Risky Dice Strategy Cost:", dice3_cost)
random_cost = validation_instance.play_random_strategy(n_iterations=1000000)
# Comparaison entre les stratégies
strategy_comparison = validation_instance.compare_strategies(num_games=1000000)
print("Strategy Comparison Results:", strategy_comparison)
optimal_policy = validation_instance.optimal_policy
mean_turns_optimal = validation_instance.simulate_state(optimal_policy, layout, circle, n_iterations=1000000)
print("Mean Turns for Optimal Strategy:", mean_turns_optimal)
safe_dice_strategy = validation_instance.safe_strategy
mean_turns_safe_dice = validation_instance.simulate_state(safe_dice_strategy, layout, circle, n_iterations=1000000)
print("Mean Turns for Safe Dice Strategy:", mean_turns_safe_dice)
normal_dice_strategy = validation_instance.normal_strategy
mean_turns_normal_dice = validation_instance.simulate_state(normal_dice_strategy, layout, circle, n_iterations=1000000)
print("Mean Turns for Normal Dice Strategy:", mean_turns_normal_dice)
risky_dice_strategy = validation_instance.risky_strategy
mean_turns_risky_dice = validation_instance.simulate_state(risky_dice_strategy, layout, circle, n_iterations=1000000)
print("Mean Turns for Risky Dice Strategy:", mean_turns_risky_dice)
random_dice_strategy = validation_instance.random_strategy
mean_turns_random_dice = validation_instance.simulate_state(random_dice_strategy, layout, circle, n_iterations=1000000)
print("Mean Turns for Random Dice Strategy:", mean_turns_random_dice)