Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
########################################
### Dataset & Neural net information ###
########################################
# // Dataset //
config_path = "config_qnn_cluster"
dataset_name = "MNIST";
dim=28
channels=1
classes=10
# // Network structure //
network_type = "full-qnn";
# network_struct = "1C1D"
network_struct = "MLP_128_64_10"
OP_TYPE = "FC";
C_IN_VEC = [1024,128];
C_OUT_VEC = [128,64];
Nl_fp = 1;
# // Conv. kernel size //
kern_size = 3
# // Regularization //
kernel_regularizer=0.
activity_regularizer=0.
# // Training iterations & savings //
Niter = 1;
Nimg_save = 128;
#####################################
########## Hyperparameters ##########
#####################################
# Main hyper-params
epochs = 30
batch_size = 128*10
# batch_size = 128
lr = 0.01
decay = 0.000025
# Decay & lr factors
decay_at_epoch = [15, 75, 150 ]
factor_at_epoch = [.25, .25, .1]
kernel_lr_multiplier = 10
# Debug and logging
progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
# finetune an be false or true
finetune = False
########################################
######### Hardware information #########
########################################
# ARCHITECTURE-RELATED PARAMETERS
arch = '6T';
tech = 'GF22nmFDX';
typeT = 'RVT';
# SUPPLY and BACK-BIAS
VDD = 0.8;
Vmax_beta = 0.12;
BBN = 0;
BBP = 0;
# CIM-SRAM I/O RESOLUTION
IAres = 1;
Wres = 1;
OAres = 32;
# ABN resolution (if enabled)
r_gamma = 6;
r_beta = 3;
# MAXIMUM INPUT VECTOR SIZE for ALL layers
Nrows = 1152;
Ncols = 512;
########################################
########## Simulation flags ############
########################################
# Simulator (techno-dependent)
simulator = "spectre"
# Enable noisy training
EN_NOISE = 0;
# Enable analog BN
ANALOG_BN = 1;
# Embedded ABN
IS_EMBEDDED = 0;
# ABN model includes ADC behaviour
ABN_INC_ADC = 1;
# Enable saving
SAVE_EN = 1;
# Is first layer FC (depends on network_struct)
IS_FL_MLP = (OP_TYPE == "FC");
#######################################
############ Output file ##############
#######################################
# Model file
path_to_model = "./saved_models/";
model_template = "models/model_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}ABN_{}noise";
# Training output files
path_to_out = "./saved_models/";
acc_file_template = "accuracy/acc_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
w_file_template = "weights/weights_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.hdf5";
in_file_template = "inputs/in_IMC_{}_IA{}b.txt";
out_file_template = "outputs/out_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise";
inference_file_template = "outputs/inference_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
# On-chip inference files
path_to_chip = "./chip_files/";
chip_in_template = "inputs/in_calcim_{}_{}_IA{}b.txt";
chip_out_template = "outputs/out_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_inference_template = "outputs/inference_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}.txt";
chip_w_template = "weights/weights_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_gamma_template = "abn/gamma_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_beta_template = "abn/beta_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_w_FP_template = "fp_weights/weights_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_gamma_FP_template = "fp_bn/gamma_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_beta_FP_template = "fp_bn/beta_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
fS_beta_fp = 128;
fS_gamma_fp = 64;
# // CPU-only training //
cpu = True
# // Dummy values required at create time //
out_wght_path = None;
tensorboard_name = None;