diff --git a/data_analysis/Code/Dektak.py b/data_analysis/archive/Code/Dektak.py
similarity index 100%
rename from data_analysis/Code/Dektak.py
rename to data_analysis/archive/Code/Dektak.py
diff --git a/data_analysis/Code/DesCar.py b/data_analysis/archive/Code/DesCar.py
similarity index 100%
rename from data_analysis/Code/DesCar.py
rename to data_analysis/archive/Code/DesCar.py
diff --git a/data_analysis/Code/Intro.py b/data_analysis/archive/Code/Intro.py
similarity index 100%
rename from data_analysis/Code/Intro.py
rename to data_analysis/archive/Code/Intro.py
diff --git a/data_analysis/Code/StressEstimation.py b/data_analysis/archive/Code/StressEstimation.py
similarity index 100%
rename from data_analysis/Code/StressEstimation.py
rename to data_analysis/archive/Code/StressEstimation.py
diff --git a/data_analysis/Code/basicDataModifier.py b/data_analysis/archive/Code/basicDataModifier.py
similarity index 100%
rename from data_analysis/Code/basicDataModifier.py
rename to data_analysis/archive/Code/basicDataModifier.py
diff --git a/data_analysis/Code/boot.txt b/data_analysis/archive/Code/boot.txt
similarity index 100%
rename from data_analysis/Code/boot.txt
rename to data_analysis/archive/Code/boot.txt
diff --git a/data_analysis/Code/comsol.py b/data_analysis/archive/Code/comsol.py
similarity index 100%
rename from data_analysis/Code/comsol.py
rename to data_analysis/archive/Code/comsol.py
diff --git a/data_analysis/Code/extractionIV.py b/data_analysis/archive/Code/extractionIV.py
similarity index 100%
rename from data_analysis/Code/extractionIV.py
rename to data_analysis/archive/Code/extractionIV.py
diff --git a/data_analysis/Code/filePath.py b/data_analysis/archive/Code/filePath.py
similarity index 100%
rename from data_analysis/Code/filePath.py
rename to data_analysis/archive/Code/filePath.py
diff --git a/data_analysis/Code/interpolate.py b/data_analysis/archive/Code/interpolate.py
similarity index 100%
rename from data_analysis/Code/interpolate.py
rename to data_analysis/archive/Code/interpolate.py
diff --git a/data_analysis/Code/machines.py b/data_analysis/archive/Code/machines.py
similarity index 100%
rename from data_analysis/Code/machines.py
rename to data_analysis/archive/Code/machines.py
diff --git a/data_analysis/Code/material.py b/data_analysis/archive/Code/material.py
similarity index 100%
rename from data_analysis/Code/material.py
rename to data_analysis/archive/Code/material.py
diff --git a/data_analysis/Code/material_data.txt b/data_analysis/archive/Code/material_data.txt
similarity index 100%
rename from data_analysis/Code/material_data.txt
rename to data_analysis/archive/Code/material_data.txt
diff --git a/data_analysis/Code/plotGeneral.py b/data_analysis/archive/Code/plotGeneral.py
similarity index 100%
rename from data_analysis/Code/plotGeneral.py
rename to data_analysis/archive/Code/plotGeneral.py
diff --git a/data_analysis/Code/plotPolytek.py b/data_analysis/archive/Code/plotPolytek.py
similarity index 100%
rename from data_analysis/Code/plotPolytek.py
rename to data_analysis/archive/Code/plotPolytek.py
diff --git a/data_analysis/Code/plotRaman.py b/data_analysis/archive/Code/plotRaman.py
similarity index 100%
rename from data_analysis/Code/plotRaman.py
rename to data_analysis/archive/Code/plotRaman.py
diff --git a/data_analysis/Code/plot_config.txt b/data_analysis/archive/Code/plot_config.txt
similarity index 100%
rename from data_analysis/Code/plot_config.txt
rename to data_analysis/archive/Code/plot_config.txt
diff --git a/data_analysis/Code/siliconPeakRemover.py b/data_analysis/archive/Code/siliconPeakRemover.py
similarity index 100%
rename from data_analysis/Code/siliconPeakRemover.py
rename to data_analysis/archive/Code/siliconPeakRemover.py
diff --git a/data_analysis/Documentation/Manual.pptx b/data_analysis/archive/Documentation/Manual.pptx
similarity index 100%
rename from data_analysis/Documentation/Manual.pptx
rename to data_analysis/archive/Documentation/Manual.pptx
diff --git a/data_analysis/ideas/background.png b/data_analysis/archive/ideas/background.png
similarity index 100%
rename from data_analysis/ideas/background.png
rename to data_analysis/archive/ideas/background.png
diff --git a/data_analysis/data_processing.py b/data_analysis/data_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c454dc759e9da2f9d964fb749b0b5b35a7e7d53
--- /dev/null
+++ b/data_analysis/data_processing.py
@@ -0,0 +1,65 @@
+import numpy as np
+from scipy.interpolate import interp1d
+import scipy.signal as signal
+from scipy.optimize import curve_fit
+
+def moving_median(x,window_length=3):
+    return signal.medfilt(x,window_length)
+
+def smooth(x, window_length=10, polyorder=2):
+    return signal.savgol_filter(x,window_length,polyorder)
+
+def interpolate(x,y,x_interp,kind="cubic"):
+    f=interp1d(x, y,kind=kind)    
+    return f(x_interp)
+
+def remove_baseline(x,y,xmin_baseline,xmax_baseline,polyorder=2):
+    index=[False]*len(x)
+    for i in range(len(xmin_baseline)):
+        index = index or ((x>=xmin_baseline[i]) & (x<=xmax_baseline[i]) )
+    p=np.polyfit(x[index],y[index],deg=polyorder)
+    baseline=np.polyval(p,x)
+    return y-baseline, baseline
+
+def fit_lorentzian(x,y,xmin=None,xmax=None,x0=520.7,A0=1,W0=3):
+    if (xmin is None) and (xmax is None):
+        x_fit=x
+        y_fit=y
+    else:
+        index=(x>xmin) & (x<xmax) 
+        x_fit=x[index]
+        y_fit=y[index]
+    p_lorentzian=curve_fit(lorentzian, x_fit, y_fit, p0=[x0,A0,W0])[0]
+    return {"position":p_lorentzian[0],"amplitude":p_lorentzian[1],"width":p_lorentzian[2]}, lorentzian(x,*p_lorentzian)
+
+def fit_lorentzian_2peaks(x,y,xmin=None,xmax=None,x0=(520,520.7),A0=(1,1),W0=(3,3)):
+    if (xmin is None) and (xmax is None):
+        x_fit=x
+        y_fit=y
+    else:
+        index=(x>xmin) & (x<xmax) 
+        x_fit=x[index]
+        y_fit=y[index]
+    p_lorentzian=curve_fit(lorentzian2, x_fit, y_fit, p0=np.reshape([x0,A0,W0],newshape=6))[0]
+    return {"position":p_lorentzian[:2],"amplitude":p_lorentzian[2:4],"width":p_lorentzian[4:]}, lorentzian(x,*p_lorentzian)
+
+def lorentzian(x,x0,A,W):
+    return A/(1+((x-x0)/(W/2))**2)
+
+def lorentzian2(x,x0,x01,A,A1,W,W1):
+    return A/(1+((x-x0)/(W/2))**2)+A1/(1+((x-x01)/(W1/2))**2)
+
+def gaussian(x, x0, w, a):
+    return a*np.exp(-np.power(x - x0, 2.) / (2 * np.power(w, 2.))) / (w * np.sqrt(2*np.pi))
+
+def fit_gaussian(x,y,xmin=None,xmax=None,x0=0,w=1,a=1):
+    if (xmin is None) and (xmax is None):
+        x_fit=x
+        y_fit=y
+    else:
+        index=(x>xmin) & (x<xmax) 
+        x_fit=x[index]
+        y_fit=y[index]
+
+    p_lorentzian=curve_fit(gaussian, x_fit, y_fit, p0=[x0,w,a])[0]
+    return {"position":p_lorentzian[0],"width":p_lorentzian[1],"amplitude":p_lorentzian[2]}, gaussian(x,*p_lorentzian)
diff --git a/data_analysis/file_handling.py b/data_analysis/file_handling.py
new file mode 100644
index 0000000000000000000000000000000000000000..a41aaf0663f71ec0b22113f26b0389f876a696ae
--- /dev/null
+++ b/data_analysis/file_handling.py
@@ -0,0 +1,69 @@
+
+import sys, os
+import numpy as np
+import datetime   
+
+def __init__(self,workdir=None):
+    if workdir==None:
+        self.workdir=os.path.dirname(sys.argv[0])
+
+def read_file(self,file_path,comments=comments,delimiter=delimiter, skip_header=skip_header,skip_footer=skip_footer,
+                    max_rows=max_rows,usecols=usecols, deletechars=deletechars):
+      
+    data=np.genfromtxt(file_path,comments=comments,delimiter=delimiter,
+                        skip_header=skip_header,skip_footer=skip_footer,
+                        max_rows=max_rows,usecols=usecols, deletechars=deletechars)
+    
+    return data
+    
+def write_in_file(self,file_path,data,delimiter=",",overwrite=False,header=None,date=True, comment="#"):
+    """ Function to write data in a file
+    
+        args:
+            - file_path (string) : path for the data file, including the filename with its extension
+            - data (scalar, list or array) : the data to be written in the file
+            - delimiter (char) : the delimiter to separate the column of the data
+            - overwrite (boolean) : if True overwrite the existing file if any, if False, append the data to the existing file if any
+            - header (string) : header to be written before the data
+            - date (boolean) : date to be written at the beginning of the file
+            - comment (char) : char to be written before the header and date to indicate non-data lines
+    
+    """        
+    if file_path.split(".")[-1]=="csv":
+        delimiter=","
+        
+    # Create file and header
+    if overwrite:
+        f = open(file_path, "w")
+    else:
+        f = open(file_path, "a")
+        
+    if date:
+        f.write("%s %s\n"%(comment,datetime.datetime.now().strftime("%c")))
+    
+    if isinstance(header, str):
+        for line in header.split("\n"):
+            f.write(comment+" "+line+"\n")
+
+    
+    shape=np.shape(data)
+    if len(shape)==0:
+        f.write("%.6E\n"%(data))
+    elif len(shape)==1:
+        for i in range(shape[0]):
+            if i==0:
+                f.write("%.6E"%(data[i]))
+            else:
+                f.write("%s%.6E"%(delimiter,data[i]))
+
+        f.write("\n")
+
+    elif len(shape)==2:
+        for i in range(shape[0]):
+            for j in range(shape[1]):
+                if j==0:
+                    f.write("%.6E"%(data[i,j]))
+                else:
+                    f.write("%s%.6E"%(delimiter,data[i,j]))
+            f.write("\n")                
+    f.close()
diff --git a/data_analysis/raman.py b/data_analysis/raman.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3afb7bb0aa8d98f39bc4eae1b742b01ca2a9726
--- /dev/null
+++ b/data_analysis/raman.py
@@ -0,0 +1,20 @@
+
+
+class raman:
+    peak_dic={"Si":[520.7],"VO2":[140, 192, 223, 260, 308, 338, 387, 395, 440, 482, 497, 588, 613, 661, 823]}
+
+    def find_peaks(self,x,y):
+
+    def find_peaks_from_file(self,file_path):
+        
+    def plot_with_peaks(self,x,y,ax=None,material=None):
+        if material not in peak_dic.keys():
+            print("This material is not available in the raman peaks database.\nAvailable material are: %s"%peak_dic.keys())
+        
+
+    def plot_from_file(self,file_path,ax=None):
+
+    def plot_from_multiple_files(self,file_paths,ax=None):
+
+    
+            
\ No newline at end of file
diff --git a/equipment_control/script_template.py b/equipment_control/script_template.py
deleted file mode 100644
index 41b9f5646139e587c33c33bc23d56895affc2877..0000000000000000000000000000000000000000
--- a/equipment_control/script_template.py
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
-# =============================================================================
-# 1. Import classes and modules
-# =============================================================================
-
-import sys
-#sys.path.insert(1, '/path/to/application/app/folder')
-sys.path.insert(1, 'D:/Roisin/Documents/chopes')
-
-import equipment_control.equipment as eq
-import equipment_control.EQUIPMENT_TYPE as EQ_TYPE
-
-
-# =============================================================================
-# 2. List  available connections (chopes use pyvisa package for communicate with most equipments)
-# =============================================================================
-rm=eq.resource_manager()
-list_connections= eq.available_connections()
-print("Available connections: %s"%str(list_connections))
-
-# =============================================================================
-# 3. Connection to the equipments
-# =============================================================================
-myEQUIPMENT=EQ_TYPE.EQUIPMENT_TYPE()
-
-# =============================================================================
-# 4. Measurement parameters
-# =============================================================================
-FILE_PATH='/path/to/data/folder'
-PARAM_INIT=???
-PARAM_OPERATION=???
-
-# =============================================================================
-# 5. Initialization of the equipments
-# =============================================================================
-myEQUIPMENT.initialize(PARAM_INIT)
-
-# =============================================================================
-# 6. Measurement script
-# =============================================================================
-myEQUIPMENT.set_PARAM(PARAM_OPERATION)
-DATA=myEQUIPMENT.read_data()
-
-# =============================================================================
-# 7. Close connection
-# =============================================================================
-myEQUIPMENT.close_connection()
-
-# =============================================================================
-# 8. Save data
-# =============================================================================
-myEQUIPMENT.write_in_file(FILE_PATH,DATA)
diff --git a/equipment_control/temp.py b/equipment_control/temp.py
deleted file mode 100644
index aad3c61798b0cbfd0729f8580373f5ce9e08f041..0000000000000000000000000000000000000000
--- a/equipment_control/temp.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Thu Mar 20 11:42:02 2025
-
-@author: nroisin
-"""
-
-import serial
-import pyvisa
-rm = pyvisa.ResourceManager()    
-print(rm.list_resources())
-mykal100=serial.Serial(
-    port="COM7",
-    baudrate=9600,
-    parity=serial.PARITY_NONE,
-    stopbits=serial.STOPBITS_ONE,
-    bytesize=serial.EIGHTBITS,
-    timeout=10
-)
-
-mykal100.write(str.encode('MS\n'))
-print(mykal100.read_until(b'\r'))
-
-mykal100.write(str.encode('>PD0\n'))
-print(mykal100.read_until(b'\r'))
-# mykal100.write(str.encode('>PP100\n'))
-# print(mykal100.read())
-
-# mykal100.write('MZ\n'.encode('utf-8'))
-
-# mykal100.write('?PS\n'.encode('ascii'))
-# print(mykal100.readline())
-
-# mykal100.write('MZ\n'.encode('ascii'))
-mykal100.close()
\ No newline at end of file