Newer
Older
########################################
### Dataset & Neural net information ###
########################################
# // Dataset //
config_path = "config_cim_cnn_param"
dataset_name = "MNIST";
dim=28
channels=1
classes=10
# // Network structure //
network_type = "full-qnn";
# network_struct = "1C1D"
network_struct = "MLP_three_stage_abn"
OP_TYPE = "FC";
C_IN_VEC = [1024,128];
C_OUT_VEC = [128,64];
Nl_fp = 1;
# // Conv. kernel size //
kern_size = 3
# // Regularization //
kernel_regularizer=0.
activity_regularizer=0.
# // Training iterations & savings //
Niter = 1;
Nimg_save = 128;
#####################################
########## Hyperparameters ##########
#####################################
# Main hyper-params
epochs = 30
batch_size = 128*10
# batch_size = 128
decay = 0.000025
# Decay & lr factors
decay_at_epoch = [15, 75, 150 ]
factor_at_epoch = [.25, .25, .1]
kernel_lr_multiplier = 10
# Debug and logging
progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
# finetune an be false or true
finetune = False
########################################
######### Hardware information #########
########################################
# ARCHITECTURE-RELATED PARAMETERS
cim_type = 'charge';
arch = '10TC';
tech = 'GF22nmFDX';
typeT = 'RVT';
# SUPPLY and BACK-BIAS
VDD = 0.8;
r_gamma = 5;
r_beta = 5;
# MAXIMUM INPUT VECTOR SIZE for ALL layers
Nrows = 1152;
Ncols = 512;
########################################
########## Simulation flags ############
########################################
# Simulator (techno-dependent)
simulator = "spectre"
# Enable noisy training
EN_NOISE = 0;
# Enable analog BN
ANALOG_BN = 1;
# Embedded ABN
IS_EMBEDDED = 0;
# Ideal or effective ABN HW model
IDEAL_ABN = 1;
# Use post-layout model instead of pre-layour versions
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# Enable saving
SAVE_EN = 1;
# Is first layer FC (depends on network_struct)
IS_FL_MLP = (OP_TYPE == "FC");
#######################################
############ Output file ##############
#######################################
# Model file
path_to_model = "./saved_models/";
model_template = "models/model_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}ABN_{}noise";
# Training output files
path_to_out = "./saved_models/";
acc_file_template = "accuracy/acc_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
w_file_template = "weights/weights_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.hdf5";
in_file_template = "inputs/in_IMC_{}_IA{}b.txt";
out_file_template = "outputs/out_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise";
inference_file_template = "outputs/inference_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
# On-chip inference files
path_to_chip = "./chip_files/";
chip_in_template = "inputs/in_calcim_{}_{}_IA{}b.txt";
chip_out_template = "outputs/out_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_inference_template = "outputs/inference_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}.txt";
chip_w_template = "weights/weights_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_gamma_template = "abn/gamma_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_beta_template = "abn/beta_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_w_FP_template = "fp_weights/weights_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_gamma_FP_template = "fp_bn/gamma_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
chip_beta_FP_template = "fp_bn/beta_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
fS_beta_fp = 128;
fS_gamma_fp = 64;
# // CPU-only training //
cpu = True
# // Dummy values required at create time //
out_wght_path = None;
tensorboard_name = None;