diff --git a/LUTs/.gitkeep b/LUTs/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/README.md b/README.md
index 525c13d4c7bf16b56ebaead50c5dadf823c8e6ae..23d877fa3f2f38f8ba834331e7dc8334176bf87a 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,12 @@
 # ModelCIMs_framework
 
-The ModelCIMs framework has been developed for the training of CNNs/DNNs aware of SRAM-based compute in-memory accelerators non-idealities.
\ No newline at end of file
+The ModelCIMs framework has been developed for the training of CNNs/DNNs aware of SRAM-based compute in-memory accelerators non-idealities.
+
+--- Copyright/referencing ---
+
+Usage of this framework is subject to copyright policies. Please cite the original research paper
+"A. Kneip et al., A 1-to-4b 16.8-POPS/W 473-TOPS/mm2 6T-based In-Memory Computing SRAM in 22nm FD-SOI with Multi-Bit Analog Batch-Normalization, Proceedings of IEEE ESSCIRC 2022, pp. 1-4" when using it for academic or industrial purpose.
+
+--- Framework usage ---
+
+--> Coming soon
diff --git a/config/.gitkeep b/config/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/config/config_cim_cnn_param.py b/config/config_cim_cnn_param.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d4a663e6459d0d9c9401c53b6db0a862405dedc
--- /dev/null
+++ b/config/config_cim_cnn_param.py
@@ -0,0 +1,122 @@
+
+########################################
+### Dataset & Neural net information ###
+########################################
+# // Dataset //
+config_path = "config_qnn_cluster"
+dataset_name = "MNIST";
+dim=28
+channels=1
+classes=10
+# // Network structure //
+network_type = "full-qnn";
+# network_struct = "1C1D"
+network_struct = "MLP_128_64_10"
+OP_TYPE = "FC";
+C_IN_VEC = [1024,128];
+C_OUT_VEC = [128,64];
+Nl_fp = 1;
+
+# // Conv. kernel size //
+kern_size = 3
+# // Regularization //
+kernel_regularizer=0.
+activity_regularizer=0.
+# // Training iterations & savings //
+Niter = 1;
+Nimg_save = 128;
+
+#####################################
+########## Hyperparameters ##########
+#####################################
+# Main hyper-params
+epochs = 30
+batch_size = 128*10
+# batch_size = 128
+lr = 0.01
+decay = 0.000025
+# Decay & lr factors
+decay_at_epoch = [15, 75, 150 ]
+factor_at_epoch = [.25, .25, .1]
+kernel_lr_multiplier = 10
+# Debug and logging
+progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
+# finetune an be false or true
+finetune = False
+
+########################################
+######### Hardware information #########
+########################################
+# ARCHITECTURE-RELATED PARAMETERS
+arch = '6T';
+tech = 'GF22nmFDX';
+typeT = 'RVT';
+# SUPPLY and BACK-BIAS
+VDD  = 0.8;
+Vmax_beta = 0.12;
+BBN  = 0;
+BBP  = 0;
+# CIM-SRAM I/O RESOLUTION
+IAres = 1;
+Wres  = 1;
+OAres = 32;
+# ABN resolution (if enabled)
+r_gamma = 6;
+r_beta  = 3;
+# MAXIMUM INPUT VECTOR SIZE for ALL layers
+Nrows = 1152;
+Ncols = 512;
+
+########################################
+########## Simulation flags ############
+########################################
+# Simulator (techno-dependent)
+simulator = "spectre"
+# Enable noisy training
+EN_NOISE = 0;
+# Enable analog BN
+ANALOG_BN = 1;
+# Embedded ABN
+IS_EMBEDDED = 0;
+# ABN model includes ADC behaviour
+ABN_INC_ADC = 1;
+# Enable saving
+SAVE_EN = 1;
+# Is first layer FC (depends on network_struct)
+IS_FL_MLP = (OP_TYPE == "FC");
+
+#######################################
+############ Output file ##############
+#######################################
+# Model file
+path_to_model = "./saved_models/";
+model_template = "models/model_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}ABN_{}noise";
+# Training output files
+path_to_out = "./saved_models/";
+acc_file_template = "accuracy/acc_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
+w_file_template = "weights/weights_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.hdf5";
+in_file_template = "inputs/in_IMC_{}_IA{}b.txt";
+out_file_template = "outputs/out_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise";
+inference_file_template = "outputs/inference_IMC_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}ABN_{}noise.txt";
+
+# On-chip inference files
+path_to_chip = "./chip_files/";
+chip_in_template  = "inputs/in_calcim_{}_{}_IA{}b.txt";
+chip_out_template = "outputs/out_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_inference_template = "outputs/inference_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}.txt";
+chip_w_template     = "weights/weights_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_gamma_template = "abn/gamma_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_beta_template  = "abn/beta_calcim_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_w_FP_template      = "fp_weights/weights_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_gamma_FP_template  = "fp_bn/gamma_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+chip_beta_FP_template   = "fp_bn/beta_fp_{}_{}_IA{}bW{}bOA{}b_noise{}";
+
+fS_beta_fp = 128;
+fS_gamma_fp = 64;
+
+# // CPU-only training //
+cpu = True
+# // Dummy values required at create time //
+out_wght_path = None;
+tensorboard_name = None;
+
diff --git a/layers/.gitkeep b/layers/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/layers/binary_layers_IMC.py b/layers/binary_layers_IMC.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bb4f77b3800508803bcd98f550254ef6b46ad95
--- /dev/null
+++ b/layers/binary_layers_IMC.py
@@ -0,0 +1,415 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+import math
+
+from keras import backend as K
+import tensorflow as tf
+
+from keras.layers import InputSpec, Layer, Dense, Conv2D
+from keras import constraints
+from keras import initializers
+# Binarization functions
+from layers.binary_ops import binarize, binarize_exp, binarize_ssb
+from layers.binary_ops import binary_sigmoid_p
+# Analog MAC operator
+from models.MAC_current import MAC_op_se_ana as MAC_op_se
+from models.MAC_current import MAC_op_diff_ana as MAC_op_diff
+from models.CONV_current import CONV_op_se_ana as CONV_op_se
+from models.CONV_current import CONV_op_diff_ana as CONV_op_diff
+# ADC model
+from models.ADC import quant_uni
+# Hardware parameters generation
+from utils.config_hardware_model import genHardware
+# Temporary dir
+import tempfile
+import sys
+import subprocess
+import time
+# Modeling files
+import os
+scriptpath = "../lib_modelcim/"
+sys.path.append(os.path.abspath(scriptpath));
+from preProc_wrapper import preProcSat as getHardwareData
+from fit_spice import DP_fit
+
+class Clip(constraints.Constraint):
+    def __init__(self, min_value, max_value=None):
+        self.min_value = min_value
+        self.max_value = max_value
+        if not self.max_value:
+            self.max_value = -self.min_value
+        if self.min_value > self.max_value:
+            self.min_value, self.max_value = self.max_value, self.min_value
+
+    def __call__(self, p):
+        return K.clip(p, self.min_value, self.max_value)
+
+    def get_config(self):
+        return {"name": self.__call__.__name__,
+                "min_value": self.min_value,
+                "max_value": self.max_value}
+
+
+class BinaryDense(Dense):
+    ''' Binarized Dense layer
+    References:
+    "BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1" [http://arxiv.org/abs/1602.02830]
+    '''
+    def __init__(self, units, H=1.,sramInfo=None, EN_NOISE=0, EN_QUANT=1, kernel_lr_multiplier='Glorot', bias_lr_multiplier=None, **kwargs):
+        super(BinaryDense, self).__init__(units, **kwargs)
+        self.H = H
+        self.kernel_lr_multiplier = kernel_lr_multiplier
+        self.bias_lr_multiplier = bias_lr_multiplier
+        
+        self.EN_NOISE = EN_NOISE
+        self.EN_QUANT = EN_QUANT
+        
+        self.sramInfo = sramInfo
+        self.hardware = None
+        self.Vt_noise = None
+        self.input_dim = None
+        
+        super(BinaryDense, self).__init__(units, **kwargs)
+    
+    def build(self, input_shape):
+        assert len(input_shape) >= 2
+        input_dim = input_shape[1]
+        self.input_dim = input_dim;
+
+        if self.H == 'Glorot':
+            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
+            #print('Glorot H: {}'.format(self.H))
+        if self.kernel_lr_multiplier == 'Glorot':
+            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
+            #print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
+        
+        # Retrieve architecture type (diff or se) and derive flag
+        archType = self.sramInfo.arch.name;
+       # if(archType == '6T'):
+        self.kernel_constraint = Clip(-self.H, self.H)
+        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
+       # elif(archType == '8T'):
+            # self.kernel_constraint = Clip(0, self.H)
+            # self.kernel_initializer = initializers.RandomUniform(0, self.H)    
+        # else:
+            # error('Unsupported cell type during binary weights initialization !');
+        
+        self.kernel = self.add_weight(shape=(input_dim, self.units),
+                                     initializer=self.kernel_initializer,
+                                     name='kernel',
+                                     regularizer=self.kernel_regularizer,
+                                     constraint=self.kernel_constraint)
+
+        if self.use_bias:
+            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
+            self.bias = self.add_weight(shape=(self.output_dim,),
+                                     initializer=self.bias_initializer,
+                                     name='bias',
+                                     regularizer=self.bias_regularizer,
+                                     constraint=self.bias_constraint)
+        else:
+            self.lr_multipliers = [self.kernel_lr_multiplier]
+            self.bias = None
+            
+        # Get DP electrical quantities for this layer
+        Nrows = self.sramInfo.Nrows.data
+        N_cim = int(math.ceil((input_dim-1)/Nrows));
+        self.sramInfo.NB.data = int(input_dim/N_cim);
+        print(f'######## FC layer with {self.sramInfo.NB.data} cells/op supplied at {self.sramInfo.VDD.data:.2f}V ######## ')
+        path_dir = '/export/home/adkneip/Documents/PhD/ELDO/IMC_PYTHON/CURRENT_MAC/'+self.sramInfo.arch.name+'_CELL/'
+        ################################################# USE TEMPORARY SIM DIRECTORY #####################################################
+        with tempfile.TemporaryDirectory(dir=path_dir,prefix='SimFolder_') as path_to_file:
+            #print(path_to_file)
+            # Copy .cir files into temporary simu folder -- '*' sumbol bugs for some reason
+            if(self.sramInfo.simulator == "eldo"):
+                file_table = np.array(['MAC_DC.cir','MAC_NL.cir','MAC_satCal.cir','MAC_time.cir','MAC_train_MC.cir']);
+            elif(self.sramInfo.simulator == "spectre"):
+                file_table = np.array(['MAC_DC.scs','MAC_satCal.scs','MAC_time.scs',
+                                        'MAC_DC.mdl','MAC_satCal.mdl','MAC_time.mdl']);
+            else:
+                sys.exit('Error: selected simulator not supported !\n');
+            for file_temp in file_table:
+                commandLine = ['cp',path_dir+'RefFolder/'+file_temp,path_to_file+'/'];
+                proc = subprocess.run(commandLine);
+                if(proc.returncode != 0):
+                    sys.exit('Error: could not copy reference files into temporary sim folder !\n');
+            # Create temporary data file
+            commandLine = ['mkdir',path_to_file+'/data'];
+            proc = subprocess.run(commandLine);
+            if(proc.returncode != 0):
+                sys.exit('Error: could not copy reference files into temporary sim folder !\n');
+            # Perform Spice simulations
+            self.sramInfo = getHardwareData(path_to_file,self.sramInfo)
+       #     time.sleep(300); # For debug
+       ###################################################################################################################################
+       # Generate hardware parameters
+        hardware = genHardware(self.sramInfo)
+        # Compute the appropriate curve-fitting factors
+        # hardware.a1 = 1; hardware.a2 = 1; hardware.b1 = 1;
+        # self.hardware = hardware
+        print(f'######## Performing three-parametric best curve-fitting ######## ') 
+        self.hardware = DP_fit(path_dir,'early',hardware)        
+        # Create V_th distribution
+        mu_Vth = self.hardware.mu_Vth
+        sig_Vth = self.hardware.sig_Vth
+        # self.Vt_noise = K.random_normal(shape=(self.units,),mean=0,stddev=sig_Vth)
+        self.Vt_noise = K.random_normal(shape=(self.units,),mean=0,stddev=0)
+              
+        # Perform build
+        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
+        self.built = True
+        
+    def call(self, inputs):
+        # Binarize weights
+        W_bin = binarize(self.kernel, H=self.H);
+        # Check if a single CIM-SRAM is sufficient, or ideal charge-share of their analog outputs
+        Nrows = self.hardware.sramInfo.Nrows.data
+        N_cim = int(math.ceil((self.input_dim-1)/Nrows));
+        # Retrieve architecture type (diff or se) and derive flag
+        archType = self.hardware.sramInfo.arch.name;
+        IS_SE_OUT = (archType == '8T') or  self.EN_QUANT;
+        # Wrap correct MAC_op function
+        if(archType == '6T'):
+            MAC_op = MAC_op_diff;
+        elif(archType == '8T'):
+            MAC_op = MAC_op_se;
+        else:
+            raise NameError('Error: selected architecture (cell type) not supported during FC layer compute !\n');
+        # Emulate 6T-based CIM-SRAM analog MAC operation, possibly with parallel macros
+        if(N_cim > 1):
+            # Separate inputs and weights in sub-matrices
+            inputs = tf.unstack(K.reshape(inputs,(-1,int(self.input_dim/N_cim),N_cim)),axis=-1)
+            W_bin = K.permute_dimensions(K.reshape(K.permute_dimensions(W_bin,(1,0)),(-1,int(self.input_dim/N_cim),N_cim)),(1,2,0))
+            W_bin = tf.unstack(W_bin,axis=1)
+            # Perform CIM-SRAM operations over all sub-matrices (i.e. different CIM-SRAMs)
+            V_DP = [];
+            for i in range(N_cim):
+                V_DP.append(MAC_op(self.hardware,inputs[i],W_bin[i],self.Vt_noise,self.EN_NOISE,self.EN_QUANT))
+            # Combine the result as if ideal charge-sharing (--> could implement actual charge-sharing !)
+            if(IS_SE_OUT):
+                V_DP = K.sum(tf.stack(V_DP,axis=2),axis=2)/N_cim;
+            else:
+                V_BL  = K.sum(tf.stack(V_DP[0],axis=2),axis=2)/N_cim;
+                V_BLB = K.sum(tf.stack(V_DP[1],axis=2),axis=2)/N_cim;
+        else:
+            if(IS_SE_OUT):
+                V_DP = MAC_op(self.hardware,inputs,W_bin,self.Vt_noise,self.EN_NOISE,self.EN_QUANT);
+            else:
+                (V_BL,V_BLB) = MAC_op(self.hardware,inputs,W_bin,self.Vt_noise,self.EN_NOISE,self.EN_QUANT);
+        # Add bias to PA
+        if self.use_bias:
+            if(IS_SE_OUT):
+                V_DP = K.bias_add(V_DP, self.bias)
+            else:
+                V_BL  = K.bias_add(V_BL,self.bias)
+                V_BLB = K.bias_add(V_BLB,self.bias)
+                
+        # Quantify the PA to get the digitized OA
+        IAres = self.hardware.sramInfo.IAres;
+        OAres = self.hardware.sramInfo.OAres;
+        NB = self.hardware.sramInfo.NB.data;
+        PAmax = (2**IAres-1)*NB;
+        DRval = self.hardware.sramInfo.DR.data;
+        VDD = self.hardware.sramInfo.VDD.data;
+        if(self.EN_QUANT):
+            DO = quant_uni(V_DP,PAmax,DRval,VDD,OAres,0.5*DRval/PAmax,archType);
+            # Return quantized output
+            return DO
+        elif(archType == '8T'):
+            return V_DP
+        else:
+            # Return unquantized differential output
+            return K.concatenate([V_BL[np.newaxis,...],V_BLB[np.newaxis,...]],axis=0)
+        
+    def get_config(self):
+        config = {'H': self.H,
+                  'kernel_lr_multiplier': self.kernel_lr_multiplier,
+                  'bias_lr_multiplier': self.bias_lr_multiplier}
+        base_config = super(BinaryDense, self).get_config()
+        return dict(list(base_config.items()) + list(config.items()))
+
+
+class BinaryConv2D(Conv2D):
+    '''Binarized Convolution2D layer
+    References: 
+    "BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1" [http://arxiv.org/abs/1602.02830]
+    '''
+    def __init__(self, filters, kernel_regularizer=None,activity_regularizer=None, kernel_lr_multiplier='Glorot',
+                 bias_lr_multiplier=None, H=1.,sramInfo=None, EN_NOISE=0, EN_QUANT=1, **kwargs):
+        super(BinaryConv2D, self).__init__(filters, **kwargs)
+        self.H = H
+        self.kernel_lr_multiplier = kernel_lr_multiplier
+        self.bias_lr_multiplier = bias_lr_multiplier
+        self.activity_regularizer = activity_regularizer
+        self.kernel_regularizer = kernel_regularizer
+        
+        self.sramInfo = sramInfo
+        self.hardware = None
+        self.Vt_noise = None
+        
+        self.EN_NOISE = EN_NOISE
+        self.EN_QUANT = EN_QUANT
+
+    def build(self, input_shape):
+        if self.data_format == 'channels_first':
+            channel_axis = 1
+        else:
+            channel_axis = -1 
+        if input_shape[channel_axis] is None:
+                raise ValueError('The channel dimension of the inputs '
+                                 'should be defined. Found `None`.')
+
+        input_dim = input_shape[channel_axis]
+        kernel_shape = self.kernel_size + (input_dim, self.filters)
+        #kernel_shape = self.kernel_size + (self.filters,)
+         
+        base = self.kernel_size[0] * self.kernel_size[1]
+        if self.H == 'Glorot':
+            nb_input = int(input_dim * base)
+            nb_output = int(self.filters * base)
+            self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
+            #print('Glorot H: {}'.format(self.H))
+            
+        if self.kernel_lr_multiplier == 'Glorot':
+            nb_input = int(input_dim * base)
+            nb_output = int(self.filters * base)
+            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
+            #print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
+
+        self.kernel_constraint = Clip(-self.H, self.H)
+        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
+        #self.bias_initializer = initializers.RandomUniform(-self.H, self.H)
+        self.kernel = self.add_weight(shape=kernel_shape,
+                                 initializer=self.kernel_initializer,
+                                 name='kernel',
+                                 regularizer=self.kernel_regularizer,
+                                 constraint=self.kernel_constraint)
+#        print(K.int_shape(self.kernel))
+
+        if self.use_bias:
+            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
+            self.bias = self.add_weight((self.filters,),
+                                     initializer=self.bias_initializer,
+                                     name='bias',
+                                     regularizer=self.bias_regularizer,
+                                     constraint=self.bias_constraint)
+
+        else:
+            self.lr_multipliers = [self.kernel_lr_multiplier]
+            self.bias = None
+            
+        # Get DP electrical quantities for this layer
+        self.sramInfo.NB.data = base*input_dim;
+        print(f'######## 2D-CONV layer with {self.sramInfo.NB.data} cells/op supplied at {self.sramInfo.VDD.data:.2f}V ######## ')
+        path_dir = '/export/home/adkneip/Documents/PhD/ELDO/IMC_PYTHON/CURRENT_MAC/'+self.sramInfo.arch.name+'_CELL/'
+        ################################################# USE TEMPORARY SIM DIRECTORY #####################################################
+        with tempfile.TemporaryDirectory(dir=path_dir,prefix='SimFolder_') as path_to_file:
+            #print(path_to_file)
+            # Copy .cir files into temporary simu folder -- '*' sumbol bugs for some reason
+            if(self.sramInfo.simulator == "eldo"):
+                file_table = np.array(['MAC_DC.cir','MAC_NL.cir','MAC_satCal.cir','MAC_time.cir','MAC_train_MC.cir']);
+            elif(self.sramInfo.simulator == "spectre"):
+                file_table = np.array(['MAC_DC.scs','MAC_satCal.scs','MAC_time.scs',
+                                        'MAC_DC.mdl','MAC_satCal.mdl','MAC_time.mdl']);
+            else:
+                sys.exit('Error: selected simulator not supported !\n');
+            for file_temp in file_table:
+                commandLine = ['cp',path_dir+'RefFolder/'+file_temp,path_to_file+'/'];
+                proc = subprocess.run(commandLine);
+                if(proc.returncode != 0):
+                    sys.exit('Error: could not copy reference files into temporary sim folder !\n');
+            # Create temporary data file
+            commandLine = ['mkdir',path_to_file+'/data'];
+            proc = subprocess.run(commandLine);
+            if(proc.returncode != 0):
+                sys.exit('Error: could not copy reference files into temporary sim folder !\n');
+                # Perform Spice simulations
+            self.sramInfo = getHardwareData(path_to_file,self.sramInfo)
+        ###################################################################################################################################
+        # Generate hardware parameters
+        hardware = genHardware(self.sramInfo)
+        # Compute the appropriate curve-fitting factors
+        # hardware.a1 = 1; hardware.a2 = 1; hardware.b1 = 1;
+        # self.hardware = hardware
+        print(f'######## Performing three-parametric best curve-fitting ######## ') 
+        self.hardware = DP_fit(path_dir,'early',hardware)        
+        # Create V_th distribution
+        sig_Vth = self.hardware.sig_Vth
+        #self.Vt_noise = K.random_normal(shape=(input_dim,),mean=0,stddev=sig_Vth)
+        self.Vt_noise = K.random_normal(shape=(input_dim,),mean=0,stddev=0)
+        
+
+        # Set input spec.
+        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
+        self.built = True
+        
+    def call(self, inputs):
+        binary_kernel = binarize(self.kernel, H=self.H)
+        # Retrieve architecture type (diff or se) and derive flag
+        archType = self.hardware.sramInfo.arch.name;
+        IS_SE_OUT = (archType == '8T') or  self.EN_QUANT;
+        # Wrap correct CONV_op function
+        if(archType == '6T'):
+            CONV_op = CONV_op_diff;
+        elif(archType == '8T'):
+            CONV_op = CONV_op_se;
+        else:
+            raise NameError('Error: selected architecture (cell type) not supported during 2DCONV layer compute !\n');
+
+        inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
+        inputs_bnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
+                  * inverse_kernel_lr_multiplier
+
+        outputs_bnn_gradient = CONV_op(
+            self.hardware,
+            inputs_bnn_gradient,
+            binary_kernel,
+            self.Vt_noise,
+            self.data_format,
+            self.EN_NOISE,
+            self.EN_QUANT)
+        
+        if(IS_SE_OUT):
+            V_DP = (outputs_bnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient))\
+                    * self.kernel_lr_multiplier
+        else:
+            V_BL  = (outputs_bnn_gradient[0] - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient[0]))\
+                    * self.kernel_lr_multiplier
+            V_BLB = (outputs_bnn_gradient[1] - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient[1]))\
+                    * self.kernel_lr_multiplier
+        
+        if self.use_bias:
+            if(IS_SE_OUT):
+                V_DP = K.bias_add(V_DP,self.bias,data_format=self.data_format);
+            else:
+                V_BL  = K.bias_add(V_BL,self.bias,data_format=self.data_format);
+                V_BLB = K.bias_add(V_BLB,self.bias,data_format=self.data_format);
+                
+        # Quantify the PA to get the digitized OA
+        IAres = self.hardware.sramInfo.IAres
+        OAres = self.hardware.sramInfo.OAres
+        NB = self.hardware.sramInfo.NB.data
+        PAmax = (2**IAres-1)*NB
+        DRval = self.hardware.sramInfo.DR.data;
+        VDD = self.hardware.sramInfo.VDD.data;
+        if(self.EN_QUANT):
+            DO = quant_uni(V_DP,PAmax,DRval,VDD,OAres,0.5*DRval/PAmax,archType);
+            # Return digitized output
+            return DO
+        elif(archType == '8T'):
+            return V_DP
+        else:
+            # Return unquantized differential output
+            return (V_BL,V_BLB)
+        
+    def get_config(self):
+        config = {'H': self.H,
+                  'kernel_lr_multiplier': self.kernel_lr_multiplier,
+                  'bias_lr_multiplier': self.bias_lr_multiplier}
+        base_config = super(BinaryConv2D, self).get_config()
+        return dict(list(base_config.items()) + list(config.items()))
+
+
+# Aliases
+
+BinaryConvolution2D = BinaryConv2D
diff --git a/logs/.gitkeep b/logs/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/models/.gitkeep b/models/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/models/model_IMC.py b/models/model_IMC.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9026db8d773940591242544e75bfb60c986a049
--- /dev/null
+++ b/models/model_IMC.py
@@ -0,0 +1,214 @@
+from keras.models import Sequential, Model
+from keras import regularizers
+from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, concatenate
+from keras.layers.advanced_activations import LeakyReLU
+from keras.regularizers import l2
+import numpy as np
+
+from layers.custom_regu import Reg_abn_out, Reg_l2_p
+#from layers.analog_BN_current_model import Analog_BN
+from layers.analog_BN_current_interp_PL import Analog_BN
+from layers.binary_layers_IMC import BinaryConv2D,BinaryDense
+from layers.quantized_layers_IMC import QuantizedConv2D,QuantizedDense
+from layers.quantized_layers_IMC_ABN import QuantizedDenseABN
+from layers.quantized_ops import my_quantized_relu as quantize_op
+from layers.binary_ops import binary_tanh as binary_tanh_op
+from layers.binary_ops import binary_sigmoid as binary_sigmoid_op
+from layers.binary_ops import binary_sigmoid_abn, binary_sigmoid_p, binary_tanh, binary_tanh_p
+from models.ADC import quant_uni,Quant_train
+from models.makeModel import make_model
+# Hardware parameters generation
+from utils.config_hardware_model import genHardware
+
+from copy import deepcopy
+
+
+def build_model(cf,model_type,sramInfo,EN_NOISE,EN_QUANT,ABN_INC_ADC):
+    # Useful build variables
+    IAres = sramInfo.IAres;
+    Wres  = sramInfo.Wres;
+    OAres = sramInfo.OAres;
+    dynRange = sramInfo.VDD.data-0.108-0.04; # To be updated --> incorporate quantization directly inside IMC layer, with an EN flag
+    H = 1.
+		
+    print('###################################################')
+    print('########### BUILDING CIM-SRAM NETWORK #############')
+    print('###################################################')
+    
+    def binary_sigmoid(x):
+        return binary_sigmoid_op(x)
+    def quant_relu(x,IAres):
+        return quantize_op(x=x,IAres=IAres)
+    
+    if cf.network_type =='float':
+        Conv_ = lambda s, f, i, c: Conv2D(kernel_size=(s, s), filters=f, strides=(1, 1), padding='same', activation='linear',
+                                   kernel_regularizer=l2(cf.kernel_regularizer),input_shape = (i,i,c),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv = lambda s, f: Conv2D(kernel_size=(s, s), filters=f, strides=(1, 1), padding='same', activation='linear',
+                                   kernel_regularizer=l2(cf.kernel_regularizer),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Act = lambda: LeakyReLU()
+        
+        Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,OAres=OAres,offset=0.5*dynRange/n))
+        
+        Dens_FP = lambda n: Dense(n,use_bias=False)
+        
+        Dens = lambda n: Dense(n,use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        
+        Dens_ = lambda n,i,c:  Dense(n,use_bias=False,activation='linear',input_shape=(i*i*c,),sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+    elif cf.network_type=='qnn':
+        Conv_ = lambda s,f,i,c,m,k: QuantizedConv2D(kernel_size=(s, s), H=1, m_T_DP=m, nRep=k, nb=Wres, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv = lambda s,f,m,k: QuantizedConv2D(kernel_size=(s, s), H=1, m_T_DP=m, nRep=k, nb=Wres, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        if(EN_QUANT):
+            # Act = lambda: LeakyReLU()
+            Act = lambda: Activation(lambda x: quant_relu(x,IAres=IAres))
+        else:
+            # Act = lambda: Activation(lambda x: binary_sigmoid_abn(x,sramInfo.VDD.data))
+            Act = lambda: Quant_train(sramInfo)
+            # Act = lambda: Activation(lambda x: quant_uni(x,maxVal=0,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0,archType=sramInfo.arch.name));
+      
+        Quant = lambda p: Activation('linear');
+        
+        Dens_FP = lambda n: Dense(n,use_bias=False)
+        
+        Dens = lambda n,m: QuantizedDense(n,nb=Wres,m_T_DP=m,use_bias=False,activation='linear',sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        
+        Dens_ = lambda n,i,c,m:  QuantizedDense(n,nb=Wres,m_T_DP=m,use_bias=False,activation='linear',input_shape=(i*i*c,),sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+    elif cf.network_type=='full-qnn':
+        Conv_ = lambda s,f,i,c,m,k: QuantizedConv2D(kernel_size=(s, s), H=1, m_T_DP=m, nRep=k, nb=Wres, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv = lambda s,f,m,k: QuantizedConv2D(kernel_size=(s, s), H=1, m_T_DP=m, nRep=k, nb=Wres, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv_FP_ = lambda s, f, i, c: Conv2D(kernel_size=(s, s), filters=f, strides=(1, 1), padding='same', activation='linear',
+                                   kernel_regularizer=l2(cf.kernel_regularizer),input_shape = (i,i,c),use_bias=False)
+        
+        if(EN_QUANT):
+            Act = lambda: Activation(lambda x: quant_relu(x,IAres=IAres))
+        else:
+            # Act = lambda: Activation(lambda x: binary_sigmoid_abn(x,sramInfo.VDD.data))
+            Act = lambda: Quant_train(sramInfo,ABN_INC_ADC)
+            # Act = lambda: Activation(lambda x: quant_uni(x,maxVal=0,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0,archType=sramInfo.arch.name));
+       
+        # Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0.5*dynRange/n,archType=sramInfo.arch.name))
+        Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0.,archType=sramInfo.arch.name))
+    
+        Dens_FP = lambda n: Dense(n,use_bias=False)
+        
+        Dens = lambda n,m: QuantizedDense(n,nb=Wres,m_T_DP=m,use_bias=False,activation='linear',sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        
+        Dens_ = lambda n,i,c,m:  QuantizedDense(n,nb=Wres,m_T_DP=m,use_bias=False,activation='linear',input_shape=(i*i*c,),sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+    elif cf.network_type=='full-qnn-embedded':
+        Conv_ = lambda s,f,i,c,m,k: QuantizedConv2D(kernel_size=(s, s), H=1, m_T_DP=m, nRep=k, nb=Wres, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv = lambda s,f,m,k: QuantizedConv2D(kernel_size=(s, s), H=1, m_T_DP=m, nRep=k, nb=Wres, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        if(EN_QUANT):
+            Act = lambda: Activation(lambda x: quant_relu(x,IAres=IAres))
+        else:
+            # Act = lambda: Activation(lambda x: binary_sigmoid_abn(x,sramInfo.VDD.data))
+            Act = lambda: Quant_train(sramInfo)
+            # Act = lambda: Activation(lambda x: quant_uni(x,maxVal=0,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0,archType=sramInfo.arch.name));
+       
+        # Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0.5*dynRange/n,archType=sramInfo.arch.name))
+        Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0.,archType=sramInfo.arch.name))
+    
+        Dens_FP = lambda n: Dense(n,use_bias=False)
+        
+        Dens = lambda n,m: QuantizedDenseABN(n,nb=Wres,m_T_DP=m,use_bias=False,activation='linear',sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT,m_sigma=4)
+        
+        Dens_ = lambda n,i,c,m:  QuantizedDenseABN(n,nb=Wres,m_T_DP=m,use_bias=False,activation='linear',input_shape=(i*i*c,),sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT,m_sigma=4)
+    elif cf.network_type=='bnn':
+        Conv_ = lambda s, f,i,c: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv = lambda s, f: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Act = lambda: LeakyReLU()
+        
+        # Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,OAres=OAres,offset=0.5*dynRange/n))
+        Quant = lambda p: Activation('linear');
+        
+        Dens_FP = lambda n: Dense(n,use_bias=False)
+       
+        Dens = lambda n: BinaryDense(n,use_bias=False,activation='linear',sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        
+        Dens_ = lambda n,i,c:  BinaryDense(n,use_bias=False,activation='linear',input_shape=(i*i*c,),sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+    elif cf.network_type=='full-bnn':
+        Conv_ = lambda s, f,i,c: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,input_shape = (i,i,c),use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        Conv = lambda s, f: BinaryConv2D(kernel_size=(s, s), H=1, filters=f, strides=(1, 1), padding='same',
+                                         activation='linear', kernel_regularizer=l2(cf.kernel_regularizer),
+                                         kernel_lr_multiplier=cf.kernel_lr_multiplier,use_bias=False,sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        if(EN_QUANT):
+            Act = lambda: Activation(lambda x: binary_sigmoid(x))
+        else:
+            # Act = lambda: Activation(lambda x: binary_sigmoid_abn(x,sramInfo.VDD.data))
+            Act = lambda: Quant_train(sramInfo)
+            # Act = lambda: Activation(lambda x: quant_uni(x,maxVal=0,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0,archType=sramInfo.arch.name));
+        # Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,OAres=OAres,offset=0.5*dynRange/n));
+        Quant = lambda n: Activation(lambda x: quant_uni(x,maxVal=n,dynRange=dynRange,VDD=sramInfo.VDD.data,OAres=OAres,offset=0.,archType=sramInfo.arch.name))
+           
+        Dens_FP = lambda n: Dense(n,use_bias=False)
+        
+        Dens = lambda n: BinaryDense(n,use_bias=False,activation='linear',sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+        
+        Dens_ = lambda n,i,c:  BinaryDense(n,use_bias=False,activation='linear',input_shape=(i*i*c,),sramInfo=deepcopy(sramInfo),EN_NOISE=EN_NOISE,EN_QUANT=EN_QUANT)
+    else:
+        print('wrong network type, the supported network types in this repo are float, qnn, full-qnn, bnn and full-bnn')
+
+    if(EN_QUANT):
+        BatchNorm = lambda: BatchNormalization(momentum=0.1,epsilon=1e-5)
+    else:
+        if(cf.network_type == 'full-qnn-embedded'):
+            BatchNorm = lambda n,m: Activation('linear');
+        else:
+            BatchNorm = lambda n,m: Analog_BN(momentum=0.1,epsilon=1e-5,renorm=True,hardware=genHardware(sramInfo),NB=n,m_sigma=m,EN_NOISE=EN_NOISE
+                                            # center=False,scale=False,
+                                            # gamma_regularizer=l2(0.001),beta_regularizer=l2(0.001))
+                                            # activity_regularizer=Reg_abn_out(1e-5,sramInfo.VDD.data))
+                                            # activity_regularizer=Reg_l2_p(0.,0.5)
+                                            );
+    
+    BatchNorm_FP = lambda: BatchNormalization(momentum=0.1,epsilon=1e-5)
+    
+    model = make_model(model_type,cf,Conv_,Conv,Dens_,Dens,Act,Quant,BatchNorm,Dens_FP,BatchNorm_FP,Conv_FP_);
+    return model
+
+
+def load_weights(model, weight_reader):
+    weight_reader.reset()
+
+    for i in range(len(model.layers)):
+        if 'conv' in model.layers[i].name:
+            if 'batch' in model.layers[i + 1].name:
+                norm_layer = model.layers[i + 1]
+                size = np.prod(norm_layer.get_weights()[0].shape)
+
+                beta = weight_reader.read_bytes(size)
+                gamma = weight_reader.read_bytes(size)
+                mean = weight_reader.read_bytes(size)
+                var = weight_reader.read_bytes(size)
+
+                weights = norm_layer.set_weights([gamma, beta, mean, var])
+
+            conv_layer = model.layers[i]
+            if len(conv_layer.get_weights()) > 1:
+                bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
+                kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
+                kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
+                kernel = kernel.transpose([2, 3, 1, 0])
+                conv_layer.set_weights([kernel, bias])
+            else:
+                kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
+                kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
+                kernel = kernel.transpose([2, 3, 1, 0])
+                conv_layer.set_weights([kernel])
+    return model
diff --git a/my_datasets/.gitkeep b/my_datasets/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_datasets/my_cifar10.py b/my_datasets/my_cifar10.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ecd4c31b5e88276dbe6217e23d1023ecabf474b
--- /dev/null
+++ b/my_datasets/my_cifar10.py
@@ -0,0 +1,63 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""CIFAR10 small images classification dataset.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+import numpy as np
+
+from keras import backend as K
+from keras.datasets.cifar import load_batch
+from keras.utils.data_utils import get_file
+from tensorflow.python.util.tf_export import tf_export
+
+
+#@tf_export('keras.datasets.cifar10.load_data')
+def load_data():
+  """Loads CIFAR10 dataset.
+
+  Returns:
+      Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
+  """
+  dirname = 'cifar-10-batches-py'
+  origin = './'
+  #path = get_file(dirname, origin=origin, untar=True)
+  path = '/export/home/adkneip/Documents/PhD/Python3/IMC_Modeling/qnn/my_datasets/cifar-10-batches-py';
+
+  num_train_samples = 50000
+
+  x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
+  y_train = np.empty((num_train_samples,), dtype='uint8')
+
+  for i in range(1, 6):
+    fpath = os.path.join(path, 'data_batch_' + str(i))
+    (x_train[(i - 1) * 10000:i * 10000, :, :, :],
+     y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
+
+  fpath = os.path.join(path, 'test_batch')
+  x_test, y_test = load_batch(fpath)
+
+  y_train = np.reshape(y_train, (len(y_train), 1))
+  y_test = np.reshape(y_test, (len(y_test), 1))
+
+  if K.image_data_format() == 'channels_last':
+    x_train = x_train.transpose(0, 2, 3, 1)
+    x_test = x_test.transpose(0, 2, 3, 1)
+
+  return (x_train, y_train), (x_test, y_test)
diff --git a/saved_models/.gitkeep b/saved_models/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/utils/.gitkeep b/utils/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/utils/load_data.py b/utils/load_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca76fafe5a493e2a5ce716e2ed1389542faa3624
--- /dev/null
+++ b/utils/load_data.py
@@ -0,0 +1,105 @@
+# Copyright 2017 Bert Moons
+
+# This file is part of QNN.
+
+# QNN is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# QNN is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# The code for QNN is based on BinaryNet: https://github.com/MatthieuCourbariaux/BinaryNet
+
+# You should have received a copy of the GNU General Public License
+# along with QNN.  If not, see <http://www.gnu.org/licenses/>.
+
+import numpy as np
+from my_datasets import my_cifar10
+from my_datasets import my_mnist
+
+
+def load_dataset(dataset):
+    if (dataset == "CIFAR-10"):
+
+        print('Loading CIFAR-10 dataset...')
+
+        #train_set_size = 45000
+        #train_set = cifar10(which_set="train", start=0, stop=train_set_size)
+        #valid_set = cifar10(which_set="train", start=train_set_size, stop=50000)
+        #test_set = cifar10(which_set="test")
+        (train_set_X,train_set_Y),(valid_set_X,valid_set_Y) = my_cifar10.load_data()
+
+        train_set_X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., train_set_X), 1.), (-1, 3, 32, 32)),(0,2,3,1))
+        valid_set_X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., valid_set_X), 1.), (-1, 3, 32, 32)),(0,2,3,1))
+        #test_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., test_set.X), 1.), (-1, 3, 32, 32)),(0,2,3,1))
+   
+        # flatten targets
+        train_set_Y = np.hstack(train_set_Y)
+        valid_set_Y = np.hstack(valid_set_Y)
+        #test_set.y = np.hstack(test_set.y)
+
+        # Onehot the targets
+        train_set_Y = np.float32(np.eye(10)[train_set_Y])
+        valid_set_Y = np.float32(np.eye(10)[valid_set_Y])
+        #test_set.y = np.float32(np.eye(10)[test_set.y])
+
+        # for hinge loss
+        train_set_Y = 2 * train_set_Y - 1.
+        valid_set_Y = 2 * valid_set_Y - 1.
+        #test_set.y = 2 * test_set.y - 1.
+        
+        # enlarge train data set by mirrroring
+        x_train_flip = train_set_X[:, :, ::-1, :]
+        y_train_flip = train_set_Y
+        train_set_X = np.concatenate((train_set_X, x_train_flip), axis=0)
+        train_set_Y = np.concatenate((train_set_Y, y_train_flip), axis=0)
+
+    elif (dataset == "MNIST"):
+
+        print('Loading MNIST dataset...')
+
+        #train_set_size = 50000
+        #train_set = mnist(which_set="train", start=0, stop=train_set_size)
+        #valid_set = mnist(which_set="train", start=train_set_size, stop=60000)
+        #test_set = mnist(which_set="test")
+        path_to_file = '../my_datasets/mnist.npz'
+        (train_set_X,train_set_Y),(valid_set_X,valid_set_Y) = my_mnist.load_data(path_to_file)
+
+        train_set_X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., train_set_X), 1.), (-1, 1, 28, 28)),(0,2,3,1))
+        valid_set_X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., valid_set_X), 1.), (-1, 1,  28, 28)),(0,2,3,1))
+        #test_set.X = np.transpose(np.reshape(np.subtract(np.multiply(2. / 255., test_set.X), 1.), (-1, 1,  28, 28)),(0,2,3,1))
+        
+        # flatten targets
+        train_set_Y = np.hstack(train_set_Y)
+        valid_set_Y = np.hstack(valid_set_Y)
+        #test_set.y = np.hstack(test_set.y)
+
+        # Onehot the targets
+        train_set_Y = np.float32(np.eye(10)[train_set_Y])
+        valid_set_Y = np.float32(np.eye(10)[valid_set_Y])
+        #test_set.y = np.float32(np.eye(10)[test_set.y])
+
+        # for hinge loss
+        train_set_Y = 2 * train_set_Y - 1.
+        valid_set_Y = 2 * valid_set_Y - 1.
+        #test_set.y = 2 * test_set.y - 1.
+        
+        # enlarge train data set by mirrroring
+        x_train_flip = train_set_X[:, :, ::-1, :]
+        y_train_flip = train_set_Y
+        train_set_X = np.concatenate((train_set_X, x_train_flip), axis=0)
+        train_set_Y = np.concatenate((train_set_Y, y_train_flip), axis=0)
+
+
+
+
+    else:
+        print("wrong dataset given")
+
+    train_set = (train_set_X,train_set_Y)
+    valid_set = (valid_set_X,valid_set_Y)
+    return train_set, valid_set