diff --git a/config/config_cim_cnn_param.py b/config/config_cim_cnn_param.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d4a663e6459d0d9c9401c53b6db0a862405dedc
--- /dev/null
+++ b/config/config_cim_cnn_param.py
@@ -0,0 +1,122 @@
+
+########################################
+### Dataset & Neural net information ###
+########################################
+# // Dataset //
+config_path = "config_qnn_cluster"
+dataset_name = "MNIST";
+dim=28
+channels=1
+classes=10
+# // Network structure //
+network_type = "full-qnn";
+# network_struct = "1C1D"
+network_struct = "MLP_128_64_10"
+OP_TYPE = "FC";
+C_IN_VEC = [1024,128];
+C_OUT_VEC = [128,64];
+Nl_fp = 1;
+
+# // Conv. kernel size //
+kern_size = 3
+# // Regularization //
+kernel_regularizer=0.
+activity_regularizer=0.
+# // Training iterations & savings //
+Niter = 1;
+Nimg_save = 128;
+
+#####################################
+########## Hyperparameters ##########
+#####################################
+# Main hyper-params
+epochs = 30
+batch_size = 128*10
+# batch_size = 128
+lr = 0.01
+decay = 0.000025
+# Decay & lr factors
+decay_at_epoch = [15, 75, 150 ]
+factor_at_epoch = [.25, .25, .1]
+kernel_lr_multiplier = 10
+# Debug and logging
+progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
+# finetune an be false or true
+finetune = False
+
+########################################
+######### Hardware information #########
+########################################
+# ARCHITECTURE-RELATED PARAMETERS
+arch = '6T';
+tech = 'GF22nmFDX';
+typeT = 'RVT';
+# SUPPLY and BACK-BIAS
+VDD  = 0.8;
+Vmax_beta = 0.12;
+BBN  = 0;
+BBP  = 0;
+# CIM-SRAM I/O RESOLUTION
+IAres = 1;
+Wres  = 1;
+OAres = 32;
+# ABN resolution (if enabled)
+r_gamma = 6;
+r_beta  = 3;
+# MAXIMUM INPUT VECTOR SIZE for ALL layers
+Nrows = 1152;
+Ncols = 512;
+
+########################################
+########## Simulation flags ############
+########################################
+# Simulator (techno-dependent)
+simulator = "spectre"
+# Enable noisy training
+EN_NOISE = 0;
+# Enable analog BN
+ANALOG_BN = 1;
+# Embedded ABN
+IS_EMBEDDED = 0;
+# ABN model includes ADC behaviour
+ABN_INC_ADC = 1;
+# Enable saving
+SAVE_EN = 1;
+# Is first layer FC (depends on network_struct)
+IS_FL_MLP = (OP_TYPE == "FC");
+
+#######################################
+############ Output file ##############
+#######################################
+# Model file
+path_to_model = "./saved_models/";
+model_template = "models/model_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}ABN_{}noise";
+# Training output files
+path_to_out = "./saved_models/";
+acc_file_template = "accuracy/acc_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
+w_file_template = "weights/weights_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.hdf5";
+in_file_template = "inputs/in_IMC_{}_IA{}b.txt";
+out_file_template = "outputs/out_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise";
+inference_file_template = "outputs/inference_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
+
+# On-chip inference files
+path_to_chip = "./chip_files/";
+chip_in_template  = "inputs/in_calcim_{}_{}_IA{}b.txt";
+chip_out_template = "outputs/out_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_inference_template = "outputs/inference_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}.txt";
+chip_w_template     = "weights/weights_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_gamma_template = "abn/gamma_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_beta_template  = "abn/beta_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_w_FP_template      = "fp_weights/weights_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_gamma_FP_template  = "fp_bn/gamma_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_beta_FP_template   = "fp_bn/beta_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+
+fS_beta_fp = 128;
+fS_gamma_fp = 64;
+
+# // CPU-only training //
+cpu = True
+# // Dummy values required at create time //
+out_wght_path = None;
+tensorboard_name = None;
+