Skip to content
Extraits de code Groupes Projets
config_cim_cnn_param.py 4,07 ko
Newer Older
  • Learn to ignore specific revisions
  • Adrian Kneip's avatar
    Adrian Kneip a validé
    
    ########################################
    ### Dataset & Neural net information ###
    ########################################
    # // Dataset //
    
    config_path = "config_cim_cnn_param"
    
    dataset_name = "CIFAR-10";
    dim=32
    channels=3
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    classes=10
    # // Network structure //
    
    network_type = "float";
    #network_struct = "MLP_512_256_32_32_10"
    network_struct = "VGG-8"
    
    OP_TYPE = "CONV-2D";
    
    C_IN_VEC = [1024,256,512,32,32];
    C_OUT_VEC = [512,256,32,32,10];
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    Nl_fp = 1;
    
    # // Conv. kernel size //
    kern_size = 3
    # // Regularization //
    
    kernel_regularizer=0.001
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    activity_regularizer=0.
    # // Training iterations & savings //
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    Nimg_save = 128;
    
    #####################################
    ########## Hyperparameters ##########
    #####################################
    # Main hyper-params
    epochs = 30
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # batch_size = 128
    
    lr = 0.005
    # decay = 0.000025
    decay = 5e-4
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # Decay & lr factors
    
    decay_at_epoch = [1, 10, 30 ]
    factor_at_epoch = [.1, .1, .1]
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    kernel_lr_multiplier = 10
    # Debug and logging
    progress_logging = 1 # can be 0 = no std logging, 1 = progress bar logging, 2 = one log line per epoch
    # finetune an be false or true
    finetune = False
    
    ########################################
    ######### Hardware information #########
    ########################################
    # ARCHITECTURE-RELATED PARAMETERS
    
    cim_type = 'charge';
    arch = '10TC';
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    tech = 'GF22nmFDX';
    typeT = 'RVT';
    # SUPPLY and BACK-BIAS
    VDD  = 0.8;
    
    Vmax_beta = 0.02;
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    BBN  = 0;
    BBP  = 0;
    # CIM-SRAM I/O RESOLUTION
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    Wres  = 1;
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # ABN resolution (if enabled)
    
    r_gamma = 5;
    r_beta  = 5;
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # MAXIMUM INPUT VECTOR SIZE for ALL layers
    Nrows = 1152;
    
    Ncols = 256;
    
    # Timing configuration (! these should be updated with the last of the conf setup)
    T_DP    = 0x3;
    T_PRE   = 0x3;
    T_MBIT  = 0x3;
    T_ADC   = 0x3;  
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    
    ########################################
    ########## Simulation flags ############
    ########################################
    # Simulator (techno-dependent)
    simulator = "spectre"
    # Enable noisy training
    
    EN_NOISE = 1;
    
    # Enable synmaic-rnage scaling (charge-domain)
    EN_SCALE = 1;
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # Enable analog BN
    ANALOG_BN = 1;
    # Embedded ABN
    IS_EMBEDDED = 0;
    
    # Ideal or effective ABN HW model
    IDEAL_ABN = 1;
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # ABN model includes ADC behaviour
    
    # Use post-layout model instead of pre-layour versions
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    # Enable saving
    SAVE_EN = 1;
    # Is first layer FC (depends on network_struct)
    IS_FL_MLP = (OP_TYPE == "FC");
    
    #######################################
    ############ Output file ##############
    #######################################
    # Model file
    path_to_model = "./saved_models/";
    model_template = "models/model_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}ABN_{}noise";
    # Training output files
    path_to_out = "./saved_models/";
    
    acc_file_template = "accuracy/acc_IMC_{}_{}_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}SCALE_{}ABN_{}noise.txt";
    w_file_template = "weights/weights_IMC_{}_{}_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}SCALE_{}ABN_{}noise.hdf5";
    in_file_template = "inputs/in_IMC_{}_{}_{}_IA{}b.txt";
    
    out_file_template = "outputs/out_IMC_{}_{}_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}SCALE_{}ABN_{}noise.txt";
    
    inference_file_template = "outputs/inference_IMC_{}_{}_{}_{}_IA{}bW{}bOA{}b_{}b{}bABN_{}iter_{}SCALE_{}ABN_{}noise.txt";
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    
    # On-chip inference files
    path_to_chip = "./chip_files/";
    
    chip_in_template  = "inputs/in_cimu_{}_{}_{}_{}_IA{}b.txt";
    chip_out_template = "outputs/out_cimu_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    chip_inference_template = "outputs/inference_cimu_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}.txt";
    chip_w_template     = "weights/weights_cimu_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    chip_gamma_template = "abn/gamma_cimu_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    chip_beta_template  = "abn/beta_cimu_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    chip_w_FP_template      = "fp_weights/weights_fp_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    chip_gamma_FP_template  = "fp_bn/gamma_fp_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    chip_beta_FP_template   = "fp_bn/beta_fp_{}_{}_{}_{}_IA{}bW{}bOA{}b_noise{}";
    
    # FPGA files
    path_to_fpga = "./chip_files/fpga/"
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    
    
    fS_beta_fp = 1024;
    
    Adrian Kneip's avatar
    Adrian Kneip a validé
    fS_gamma_fp = 64;
    
    # // CPU-only training //
    cpu = True
    # // Dummy values required at create time //
    out_wght_path = None;
    tensorboard_name = None;