diff --git a/config/config_sweep_param.py b/config/config_sweep_param.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f6aabad45afea6e4b04edb78514784d527886b9
--- /dev/null
+++ b/config/config_sweep_param.py
@@ -0,0 +1,133 @@
+
+########################################
+### Dataset & Neural net information ###
+########################################
+# // Dataset //
+config_path = "config_cim_cnn_param"
+dataset_name = "MNIST";
+dim=28
+channels=1
+classes=10
+# // Network structure //
+network_type = "full-qnn";
+# network_struct = "1C1D"
+network_struct = "MLP_three_stage_abn"
+OP_TYPE = "FC";
+C_IN_VEC = [1024,128];
+C_OUT_VEC = [128,64];
+Nl_fp = 1;
+
+# // Conv. kernel size //
+kern_size = 3
+# // Regularization //
+kernel_regularizer=0.
+activity_regularizer=0.
+# // Training iterations & savings //
+Niter = 1;
+Nimg_save = 128;
+
+#####################################
+########## Hyperparameters ##########
+#####################################
+# Main hyper-params
+epochs = 3
+batch_size = 128*10
+# batch_size = 128
+lr = 0.005
+decay = 0.000025
+# Decay & lr factors
+decay_at_epoch = [15, 75, 150 ]
+factor_at_epoch = [.25, .25, .1]
+kernel_lr_multiplier = 10
+# Debug and logging
+progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
+# finetune an be false or true
+finetune = False
+
+########################################
+######### Hardware information #########
+########################################
+# ARCHITECTURE-RELATED PARAMETERS
+cim_type = 'charge';
+arch = '10TC';
+tech = 'GF22nmFDX';
+typeT = 'RVT';
+# SUPPLY and BACK-BIAS
+VDD  = 0.8;
+Vmax_beta = 0.1;
+BBN  = 0;
+BBP  = 0;
+# CIM-SRAM I/O RESOLUTION
+IAres = 4;
+Wres  = 1;
+OAres = IAres;
+# ABN resolution (if enabled)
+r_gamma = 5;
+r_beta  = 8;
+# MAXIMUM INPUT VECTOR SIZE for ALL layers
+Nrows = 1152;
+Ncols = 512;
+
+#######################################################################
+######### Sweep vectors (comment out related HW info above !) #########
+#######################################################################
+IAres_vec = [1,2,4];
+r_gamma_vec = [1,8];
+
+########################################
+########## Simulation flags ############
+########################################
+# Simulator (techno-dependent)
+simulator = "spectre"
+# Enable noisy training
+EN_NOISE = 0;
+# Enable analog BN
+ANALOG_BN = 1;
+# Embedded ABN
+IS_EMBEDDED = 0;
+# Ideal or effective ABN HW model
+IDEAL_ABN = 1;
+# ABN model includes ADC behaviour
+ABN_INC_ADC = 0;
+# Use post-layout model instead of pre-layour versions
+FLAG_PL = 0;
+# Enable saving
+SAVE_EN = 1;
+# Is first layer FC (depends on network_struct)
+IS_FL_MLP = (OP_TYPE == "FC");
+
+#######################################
+############ Output file ##############
+#######################################
+# Model file
+path_to_model = "./saved_models/";
+model_template = "models/model_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}ABN_{}noise";
+# Training output files
+path_to_out = "./saved_models/";
+acc_file_template = "accuracy/acc_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
+w_file_template = "weights/weights_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.hdf5";
+in_file_template = "inputs/in_IMC_{}_IA{}b.txt";
+out_file_template = "outputs/out_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise";
+inference_file_template = "outputs/inference_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
+
+# On-chip inference files
+path_to_chip = "./chip_files/";
+chip_in_template  = "inputs/in_calcim_{}_{}_IA{}b.txt";
+chip_out_template = "outputs/out_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_inference_template = "outputs/inference_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}.txt";
+chip_w_template     = "weights/weights_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_gamma_template = "abn/gamma_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_beta_template  = "abn/beta_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_w_FP_template      = "fp_weights/weights_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_gamma_FP_template  = "fp_bn/gamma_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_beta_FP_template   = "fp_bn/beta_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+
+fS_beta_fp = 128;
+fS_gamma_fp = 64;
+
+# // CPU-only training //
+cpu = True
+# // Dummy values required at create time //
+out_wght_path = None;
+tensorboard_name = None;
+
diff --git a/train_cim_qnn.py b/train_cim_qnn.py
index ef04446795241c711bb03dbe97e54d74522ab8b3..8b531aa82c289c94bd97b34ae7c3d6180f212f38 100644
--- a/train_cim_qnn.py
+++ b/train_cim_qnn.py
@@ -249,6 +249,8 @@ def train_eval_model(data_files,model,precisions,input_data,Niter,SAVE_EN):
           
     return;
 
+########################## MAIN FUNCTION #############################
+
 ########################## IN/OUT FILES #############################
 # Fill output files name in and concat
 acc_file  = path_to_out+acc_file_template.format(dataset_name,network_struct,IAres,Wres,OAres,r_gamma,r_beta,Niter,ANALOG_BN,EN_NOISE);
diff --git a/train_param.py b/train_param.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3384608a93d57cc51aa6b0d677fad1501349235
--- /dev/null
+++ b/train_param.py
@@ -0,0 +1,282 @@
+from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, LearningRateScheduler
+from tensorflow.keras.optimizers import SGD, Adam
+from keras.losses import squared_hinge, categorical_crossentropy
+from keras.models import Model
+import tensorflow as tf
+import keras.backend as K
+import numpy as np
+
+from models.model_IMC import build_model, load_weights
+from utils.config_utils import Config
+from utils.load_data import load_dataset
+
+from utils.config_hardware_model import SramInfo_current, SramInfo_charge
+from config.config_sweep_param import*
+
+# // Override configuration //
+override = {}
+override_dir = {}
+
+for s in override:
+    s_s = s.split("=")
+    k = s_s[0].strip()
+    v = "=".join(s_s[1:]).strip()
+    override_dir[k]=v
+override = override_dir
+# Create config object
+cf = Config(config_path,cmd_args = override)
+
+# // SramInfo class selection
+if(cim_type == 'current'):
+  SramInfo = SramInfo_current;
+elif(cim_type == 'charge'):
+  SramInfo = SramInfo_charge;
+else:
+  raise NameError('Selected CIM type not supported !');
+
+############################ INTERNAL FUNCTIONS ##############################
+
+### Generate model ###
+def generate_model(data_files,cf,network_struct,sramInfo,FLAGS):
+    # Retrieve output files
+    SAVE_EN = FLAGS[0];
+    EN_NOISE = FLAGS[1];
+    ANALOG_BN = FLAGS[2];
+    IS_FL_MLP = FLAGS[3];
+    IDEAL_ABN = FLAGS[4]
+    ABN_INC_ADC = FLAGS[5];
+    FLAG_PL = FLAGS[6]
+    # Weights file
+    w_file = data_files[1];
+    # Retrieve resolution(s)
+    IAres = sramInfo.IAres;
+    Wres  = sramInfo.Wres;
+    OAres = sramInfo.OAres;
+    # Construct the network
+    print('Construct the Network(s)\n')
+        
+    # // Create ideal model //
+    model = build_model(cf,network_struct,sramInfo,EN_NOISE,[FLAG_PL,not(ANALOG_BN),IDEAL_ABN,ABN_INC_ADC])
+    
+    print('Loading data\n')
+    train_data, val_data = load_dataset(cf.dataset_name)
+
+    if(IS_FL_MLP):
+        x_train = train_data[0].reshape(train_data[0].shape[0],train_data[0].shape[1]*train_data[0].shape[2])
+        x_test = val_data[0].reshape(val_data[0].shape[0],val_data[0].shape[1]*val_data[0].shape[2])
+        train_data = (x_train,train_data[1])
+        val_data = (x_test,val_data[1])
+    
+    # learning rate schedule
+    def scheduler(epoch):
+        if epoch == cf.decay_at_epoch:
+            index = cf.decay_at_epoch.index(epoch)
+            factor = cf.factor_at_epoch[index]
+            lr = K.get_value(model.optimizer.lr)
+            IT = train_data[0].shape[0]/cf.batch_size
+            current_lr = lr * (1./(1.+cf.decay*epoch*IT))
+            K.set_value(model.optimizer.lr,current_lr*factor)
+            print('\nEpoch {} updates LR: LR = LR * {} = {}\n'.format(epoch+1,factor, K.get_value(model.optimizer.lr)))
+        return K.get_value(model.optimizer.lr)
+            
+    lr_decay = LearningRateScheduler(scheduler)
+
+
+    #sgd = SGD(lr=cf.lr, decay=cf.decay, momentum=0.9, nesterov=True)
+    adam= Adam(lr=cf.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=cf.decay)
+
+    # Perform training and validation on ideal model
+    print('Compiling the network\n')
+    model.compile(loss=categorical_crossentropy,optimizer=adam,metrics=['accuracy'])
+    if cf.finetune:
+        print('Load previous weights\n')
+        model.load_weights(w_file)
+    else:
+        print('No weights preloaded, training from scratch selected\n')
+    # Return created model
+    return model;
+    
+### Numpy input quantization ###
+def quant_input(x,IAres):
+    # Quantize between 0 and 2^IAres
+    print(x)
+    m = pow(2,IAres);
+    y = m*(x+1)/2;
+    return K.clip(np.floor(y),0,m-1);
+    
+### Input pre-processing ###
+def process_input(dataset,IS_FL_MLP,precisions):
+    # Get input resolution
+    IAres = precisions[0];
+    # Get training and testing sets
+    train_data, test_data = load_dataset(dataset)
+    # Reshape for first layer FC
+    if(IS_FL_MLP):
+        x_train = train_data[0].reshape(train_data[0].shape[0],train_data[0].shape[1]*train_data[0].shape[2])
+        x_test = test_data[0].reshape(test_data[0].shape[0],test_data[0].shape[1]*test_data[0].shape[2])
+        train_data = (x_train,train_data[1])
+        test_data = (x_test,test_data[1])
+    # Quantize inputs    
+    x_train = quant_input(train_data[0],IAres);
+    x_test = quant_input(test_data[0],IAres);
+    train_data = (x_train,train_data[1])
+    test_data = (x_test,test_data[1])
+    return(train_data,test_data);
+
+### Train and evaluate model ###
+def train_eval_model(data_files,model,precisions,input_data,Niter,SAVE_EN):
+    # // Local variables //
+    # Retrieve resolution(s)
+    IAres = precisions[0]
+    Wres  = precisions[1]
+    OAres = precisions[2]
+    # Retrieve inputs
+    train_data = input_data[0];
+    test_data  = input_data[1];
+    # Retrieve output files
+    acc_file = data_files[0];
+    w_file   = data_files[1];
+    in_file  = data_files[2];
+    out_file = data_files[3];
+    inference_file = data_files[4];
+    
+    # // Iterative training //
+    # BN weights storage
+    weightsTensorVec = [];
+    # Average on numerous trainings
+    acc_iter = []; acc_max = 0;
+    best_model = None;
+    for s in range(Niter):
+        # // Create callbacks //
+        print('Setting up the network and creating callbacks\n')
+        early_stop  = EarlyStopping(monitor='loss', min_delta=0.001, patience=10, mode='min', verbose=1)
+        checkpoint  = ModelCheckpoint(w_file, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max', save_freq='epoch')
+        tensorboard = TensorBoard(log_dir='./logs/' + dataset_name+network_type+network_struct, histogram_freq=0, write_graph=True, write_images=False)
+
+        # // Learning rate schedule //
+        def scheduler(epoch):
+            if epoch == decay_at_epoch:
+                index = decay_at_epoch.index(epoch)
+                factor = factor_at_epoch[index]
+                lr = K.get_value(model.optimizer.lr)
+                IT = train_data[0].shape[0]/batch_size
+                current_lr = lr * (1./(1.+decay*epoch*IT))
+                K.set_value(model.optimizer.lr,current_lr*factor)
+                print('\nEpoch {} updates LR: LR = LR * {} = {}\n'.format(epoch+1,factor, K.get_value(model.optimizer.lr)))
+            return K.get_value(model.optimizer.lr)
+        # Create LR scheduler using this custom scheduling
+        lr_decay = LearningRateScheduler(scheduler)
+
+        # // Train the model //
+        print('### Training the network ###\n')
+        history = model.fit(train_data[0],train_data[1],
+                    batch_size = batch_size,
+                    epochs = epochs,
+                    verbose = progress_logging,
+                    callbacks = [checkpoint, tensorboard,lr_decay],
+                    validation_split = 0.15,
+                    workers = 4,
+                    use_multiprocessing = True
+                    );              
+         
+        # Test model
+        print('### Training done ! Evaluating on test data... ###')
+        history_eval = model.evaluate(test_data[0],test_data[1],
+                        batch_size = batch_size,
+                        verbose = progress_logging
+                        )
+        # Get model weights and retrieve those of BN layers
+        weights_temp = model.get_weights();
+        weightsTensorVec.append(weights_temp);
+        
+        # Get outputs of each layer_outputs
+        Nlayers = len(model.layers);
+        data_out = [];
+        for i in range(Nlayers):
+            partial_model = Model(model.input,model.layers[i].output);
+            data_out.append(partial_model(test_data[0],training=False));
+            
+        # Print accuracy for this iteration
+        acc_train = history.history['accuracy'][-1]
+        acc_val = history.history['val_accuracy'][-1]
+        acc_test = history_eval[-1]
+        print('Evaluate done for iter #'+str(s))
+        print(f'Training/Validation/Test accuracy: {100*acc_train:.2f}%/{100*acc_val:.2f}%/{100*acc_test:.2f}%')
+        # Store accuracy
+        acc_iter.append(np.array([acc_train,acc_val,acc_test]));
+        # Keep best model to extract weights and outputs from 
+        if(acc_test > acc_max):
+            acc_max = acc_test;
+            best_model = model;
+    # Compute accuracy mean over the iterations to obain a robust result
+    acc_iter = np.stack(acc_iter,axis=1);
+    acc_mean = np.sum(acc_iter,axis=1)/Niter
+    print('###################################################################')
+    print('IMC model - average {}b-IA, {}b-W, {}b-OA test accuracy: {:.5f}'.format(IAres,Wres,OAres,acc_mean[2]))
+    print('###################################################################')
+    
+    # // Save results from best model across all iterations //
+    if(SAVE_EN):
+        fileID = open(acc_file,'w')
+        fileID.write("acc_train,acc_val,acc_test\n");
+        for i in range(Niter):
+            fileID.write("{:.5f},{:.5f},{:.5f}\n".format(acc_iter[0,i],acc_iter[1,i],acc_iter[2,i]));
+        fileID.write("{:.5f},{:.5f},{:.5f}\n".format(acc_mean[0],acc_mean[1],acc_mean[2]));
+        fileID.close()
+        # Save inputs
+        with open(in_file,"w") as f:
+          np.savetxt(f,np.reshape(test_data[0][0:Nimg_save],(-1,1)),fmt='%d');
+        # Save outputs
+        Nlayers = len(best_model.layers); indL = 0;
+        for i in range(Nlayers):
+          # Get desired layer outputs
+          partial_model = Model(best_model.input,best_model.layers[i].output);
+          data_out = partial_model(test_data[0][0:Nimg_save],training=False);
+          # Write outputs to file, if ADC output only
+          #if(i==6 or i==7 or i==8):
+          #  print(data_out)
+          if(i==2 or i==6 or i==9):
+            out_file_temp = out_file+"_layer_{}.txt".format(indL);
+            indL = indL+1;
+            with open(out_file_temp,"w") as f:
+              np.savetxt(f,np.reshape(data_out,(-1,1)),fmt='%f');
+        # Save inference result
+        with open(inference_file,"w") as f:
+          indResult = np.argmax(test_data[1][0:Nimg_save],axis=-1);
+          np.savetxt(f,np.reshape(indResult,(-1,1)),fmt='%d');
+        # Save weights
+        best_model.save_weights(w_file);
+          
+    return;
+    
+########################## MAIN FUNCTION #############################
+# Update parametric loops based on parameters to sweep
+for r_gamma in r_gamma_vec:
+  for IAres in IAres_vec:
+    OAres = IAres;
+    ########################## IN/OUT FILES #############################
+    # Fill output files name in and concat
+    acc_file  = path_to_out+acc_file_template.format(dataset_name,network_struct,IAres,Wres,OAres,r_gamma,r_beta,Niter,ANALOG_BN,EN_NOISE);
+    w_file    = path_to_out+w_file_template.format(dataset_name,network_struct,IAres,Wres,OAres,r_gamma,r_beta,Niter,ANALOG_BN,EN_NOISE);
+    in_file   = path_to_out+in_file_template.format(dataset_name,IAres);
+    out_file  = path_to_out+out_file_template.format(dataset_name,network_struct,IAres,Wres,OAres,r_gamma,r_beta,Niter,ANALOG_BN,EN_NOISE);
+    inference_file  = path_to_out+inference_file_template.format(dataset_name,network_struct,IAres,Wres,OAres,r_gamma,r_beta,Niter,ANALOG_BN,EN_NOISE);
+    data_files = [acc_file,w_file,in_file,out_file,inference_file];
+    
+    ########################## GENERATE CIM-QNN MODEL #########################
+    # Concatenate flags
+    FLAGS = [SAVE_EN,EN_NOISE,ANALOG_BN,IS_FL_MLP,IDEAL_ABN,ABN_INC_ADC,FLAG_PL];
+    # Generate hardware information
+    sramInfo = SramInfo(arch,tech,typeT,VDD,BBN,BBP,IAres,Wres,OAres,r_gamma,r_beta,Nrows,[IS_EMBEDDED,ABN_INC_ADC]);
+    sramInfo.simulator = simulator;
+    # Create model and check if successful                    
+    model = generate_model(data_files,cf,network_struct,sramInfo,FLAGS);
+    
+    ########################## TRAIN & TEST ON PRE-DEFINED MODEL #########################
+    # Concat precision info
+    precisions = [IAres,Wres,OAres];
+    # // Pre-process input //
+    input_data = process_input(dataset_name,IS_FL_MLP,precisions);
+    # // Train and eval //
+    train_eval_model(data_files,model,precisions,input_data,Niter,SAVE_EN);
+    
\ No newline at end of file