diff --git a/Dataset_tools/Rename_vgg_face_dataset.py b/Dataset_tools/Rename_vgg_face_dataset.py
index 1f7b1d3d1b2776d1ffd6b1eb2ad8743874ee9c17..0bad9d6ce98e4363e4178594e22df2f3c285cc7b 100644
--- a/Dataset_tools/Rename_vgg_face_dataset.py
+++ b/Dataset_tools/Rename_vgg_face_dataset.py
@@ -1,5 +1,5 @@
 import os
-import shutil
+
 
 
 
diff --git a/Dataset_tools/Split_celeba_in_folders.py b/Dataset_tools/Split_celeba_in_folders.py
index 119378034745fe3e06870c42e611d98bf22ea8e7..275ea779f8cedfc3ff3ee02f3cd4f3cd8645463c 100644
--- a/Dataset_tools/Split_celeba_in_folders.py
+++ b/Dataset_tools/Split_celeba_in_folders.py
@@ -3,8 +3,15 @@ import os
 import shutil
 
 
-def Read_Two_Column_File(file_name):
-    with open(file_name, 'r') as f_input:
+def Read_Two_Column_File(file_path):
+    """
+
+    :param file_path: This is the file path
+    :return: Two lists containing the labels and the filenames corresponding to each label.
+    Y is the labels
+    X is the filenames
+    """
+    with open(file_path, 'r') as f_input:
         csv_input = csv.reader(f_input, delimiter=' ', skipinitialspace=True)
         x = []
         y = []
@@ -16,8 +23,8 @@ def Read_Two_Column_File(file_name):
 
 x, y = Read_Two_Column_File(r'/home/khlifi/Downloads/identity_CelebA.txt')
 
-path = r'/home/khlifi/Documents/newdat'
-src_folder = r"/home/khlifi/Documents/datasets"
+path = 'destination path'
+src_folder = 'source folder path'
 for i in range(len(x)):
     b=0
     for j in range(len(x)):
diff --git a/Dataset_tools/crop_celeba.py b/Dataset_tools/crop_celeba.py
index 7190e991721d35224967587ba4bbae3c1c736adc..aaf9d0eae2dd68faed86d266b25bc73d2d410ea0 100644
--- a/Dataset_tools/crop_celeba.py
+++ b/Dataset_tools/crop_celeba.py
@@ -6,6 +6,12 @@ from matplotlib.pyplot import imshow
 import os
 import shutil
 def crop_image(image_path):
+    """
+
+    :param image_path: Path to the input image
+    :return: tuple containing a boolean indicating successful detection and the cropped
+    image if successful otherwise (False, None)
+    """
     detector = MTCNN()
     img=cv2.imread(image_path)
     data=detector.detect_faces(img)
@@ -13,23 +19,25 @@ def crop_image(image_path):
     if data !=[]:
         for faces in data:
             box=faces['box']
-            # calculate the area in the image
+            # calculate the area of the bounding box in the image
             area = box[3] * box[2]
+            #Checks if the current detected face is the largest
             if area>biggest:
                 biggest=area
                 bounding_box=box
+        # Ensure bounding box coordinates are non-negative
         bounding_box[0]= 0 if bounding_box[0]<0 else bounding_box[0]
         bounding_box[1]= 0 if bounding_box[1]<0 else bounding_box[1]
-        img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]]
-        cropped_face = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb
+        #crop  the face region from the imgae
+        cropped_img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]]
+        cropped_face = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb
         return (True, cropped_face)
     else:
         return (False, None)
 #path = r"G:\datasets\celeb-A\img_align_celeba\000060.jpg"
-Path2 = r'/home/khlifi/Downloads/img_align_celeba'
-dst_path= r"/home/khlifi/Documents/datasets"
+Path2 = 'replace with source path'
+dst_path= 'replace with destination path'
 all_files = os.listdir(Path2)
-print(len(all_files))
 for files in all_files:
     img_path = os.path.join(Path2, files)
     dst_path3 = os.path.join(dst_path, files)
diff --git a/Dataset_tools/crop_vggface.py b/Dataset_tools/crop_vggface.py
index 37386a7526a4c1657eeb54ea60ef0005502ee450..6f22c40cc5df5dd2d7f901da37c770c12e16e7af 100644
--- a/Dataset_tools/crop_vggface.py
+++ b/Dataset_tools/crop_vggface.py
@@ -1,10 +1,13 @@
 from mtcnn import MTCNN
 import cv2
-#import matplotlib.pyplot as plt
-#from matplotlib.pyplot import imshow
 import os
-import shutil
 def crop_image(image_path):
+    """
+
+    :param image_path: Path to the input image
+    :return: tuple containing a boolean indicating successful detection and the cropped
+    image if successful otherwise (False, None)
+    """
     detector = MTCNN()
     img=cv2.imread(image_path)
     data=detector.detect_faces(img)
@@ -12,23 +15,25 @@ def crop_image(image_path):
     if data !=[]:
         for faces in data:
             box=faces['box']
-            # calculate the area in the image
+            # calculate the area of the bounding box in the image
             area = box[3] * box[2]
+            #Checks if the current detected face is the largest
             if area>biggest:
                 biggest=area
-                bbox=box
-        bbox[0]= 0 if bbox[0]<0 else bbox[0]
-        bbox[1]= 0 if bbox[1]<0 else bbox[1]
-        img =img[bbox[1]: bbox[1]+bbox[3],bbox[0]: bbox[0]+ bbox[2]]
-        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb
-        return (True, img)
+                bounding_box=box
+        # Ensure bounding box coordinates are non-negative
+        bounding_box[0]= 0 if bounding_box[0]<0 else bounding_box[0]
+        bounding_box[1]= 0 if bounding_box[1]<0 else bounding_box[1]
+        #crop  the face region from the imgae
+        cropped_img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]]
+        cropped_face = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb
+        return (True, cropped_face)
     else:
         return (False, None)
-#path = r"G:\datasets\celeb-A\img_align_celeba\000060.jpg"
-Path2 = r"/home/khlifi/Documents/vgg_face/train"
-dst_path = r"/home/khlifi/Documents/vggface_cropped"
+
+Path2 = 'replace with source directory'
+dst_path = 'replace with destination directory'
 all_folders = os.listdir(Path2)
-print(len(all_folders))
 result = False
 
 for folders in all_folders:
diff --git a/Siamese_model.py b/Siamese_model.py
index fa47c7791d26eaf44200dea54f41dd0937f40e71..75370a2828024ff2c36d02a795c3f2ba08821ecf 100644
--- a/Siamese_model.py
+++ b/Siamese_model.py
@@ -11,9 +11,15 @@ from keras.models import Model, Sequential
 
 
 def image_embedder(input_shape):
+    """
+
+    :param input_shape: Shape of th expected input
+    :return: the encoder
+    """
     "this function creates a CNN that will be used to generate embeddings of the images"
     "the layers until the 27th layer will be frozen"
 
+
     pretrained_model = Xception(
         input_shape=input_shape,
         weights='imagenet',
@@ -36,6 +42,11 @@ def image_embedder(input_shape):
 
 
 def get_siamese_network(input_shape):
+    """
+
+    :param input_shape: shape of the input expected by the network
+    :return: the encoder and the siamse network.
+    """
     encoder = image_embedder(input_shape)
 
     # Define the input layers of the model for the inputs
diff --git a/face_image_quality/face_image_quality.py b/face_image_quality/face_image_quality.py
index a61c566ef77e079aa660c4cf45cca2a4ada349e8..204654822966254b1beea3f8c21f6fbaa021f1ab 100644
--- a/face_image_quality/face_image_quality.py
+++ b/face_image_quality/face_image_quality.py
@@ -6,28 +6,43 @@ from numpy.linalg import norm
 
 
 
+image_path = r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\database\1\0.jpg'
 
-
-image_path = cv2.imread(r'G:\datasets\new_dat\1\006439.jpg')
+image = cv2.imread(image_path)
 #gray = cv2.imread(r'/home/khlifi/Documents/test_bgg/n000017/53.jpg', 0)
 
 
 def brightness(img):
+    """
+
+    :param img: Image path
+    :return: Euclidean norm
+    """
     # A good value for brightness would be 80
     if len(img.shape) == 3:
         # Colored RGB or BGR (*Do Not* use HSV images with this function)
-        # create brightness with euclidean norm
+        # create brightness with Euclidean norm
         return np.average(norm(img, axis=2)) / np.sqrt(3)
     else:
         # Grayscale
         return np.average(img)
 #print(f"brightness is: {brightness(img)}")
 def blurr_detection(img):
+    """
+
+    :param img: the image path
+    :return: the variance of the laplacian of an image
+    """
 
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     variance_of_laplacian = cv2.Laplacian(gray, cv2.CV_64F).var()
     return variance_of_laplacian
 def resolution_measurement(img):
+    """
+
+    :param img: takes the image path as input
+    :return: returns the width and height
+    """
     wid = img.shape[1]
     hgt = img.shape[0]
     print(str(wid) + "x" + str(hgt))
@@ -35,12 +50,18 @@ def resolution_measurement(img):
 #resolution_measurement(img)
 
 def check_image_quality(image_path):
+    """
+
+    :param image_path: takes the image_path as input, to check the quality of the images
+    :return: 1 if the image passed quality check, 0 if not
+    """
     img = cv2.imread(image_path)
-    if resolution_measurement(img) == (383,524) and blurr_detection(img)>100 and brightness(img)>90:
+    #if resolution_measurement(img) == (383,524) and blurr_detection(img)>100 and brightness(img)>90:
+    if blurr_detection(img) > 100 and brightness(img) > 90:
         print("image quality is acceptable")
         return 1
     else:
         print('image was not accepted')
         return 0
-check_image_quality()
 
+check_image_quality(image_path)
diff --git a/functions.py b/functions.py
index db5019ecdb7c1f8b085c8a3e3e1e05b7acc76f34..4436061e048c906c84efa5fb103fc4e0a4590dad 100644
--- a/functions.py
+++ b/functions.py
@@ -5,6 +5,14 @@ from keras.applications.inception_v3 import preprocess_input
 import random
 
 def get_image(image_path, preprocessing=False, resize=False, shape= (224,224,3)):
+    """
+
+    :param image_path: Path to the image file
+    :param preprocessing: if it's set to true, preprocessing will be performed, it's by default false
+    :param resize: Set to true if resizing is needed
+    :param shape: Image size for resizing
+    :return: the 3d tensor containing the image information
+    """
 
     image_string = tf.io.read_file(image_path)
     image = tf.image.decode_jpeg(image_string, channels=3)
@@ -20,6 +28,12 @@ def preprocess_images(anchor_img, positive_img, negative_img):
 
 
 def split_dataset(directory, split=0.9):
+    """
+
+    :param directory: Path to the directory containing the dataset
+    :param split: The percentage of data that will be used in training.
+    :return: returns two dictionaries in the following shape : {'label', 'number of images corresponding to that label}
+    """
     all_files = os.listdir(directory)
     len_train = int(len(all_files) * split)
     random.shuffle(all_files)
@@ -47,7 +61,15 @@ def split_dataset(directory, split=0.9):
     return train_list, test_list
 
 def create_triplets(directory, folder_list, max_files=20):
-    "Create triplets from the generated dataset lists."
+    """
+
+    :param directory: Path to the directory of the dataset
+    :param folder_list: this is the dictionary given by the 'split dataset function'
+    :param max_files: maximum number of files that can be used for triplet creation from each label
+    :return: list of tuples, each tuple containing 3 tuples (anchor, positive, negative).
+    Tuples are in the following form : ('label', 'filename')
+    """
+    "Creates triplets from the generated dataset lists."
     triplets = []
     list_folders = list(folder_list.keys())
 
@@ -73,6 +95,12 @@ def create_triplets(directory, folder_list, max_files=20):
     return triplets
 
 def Generate_dataset(list, Path):
+    """
+
+    :param list: This is the triplet list
+    :param Path: Path to the dataset
+    :return: returns a tensorflow dataset of images.
+    """
     "this function will create a tf dataset "
     anchor_label = []
     positive_label = []
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e99009c71a7fee96cc3da6a745e83213c03bec67
Binary files /dev/null and b/requirements.txt differ
diff --git a/test_model/capture_image.py b/test_model/capture_image.py
index a5131aada48e94bae62eadf05ed48d2a8c5a3e93..c432df18ece39390f7b8ce6c1f11390239d5eedc 100644
--- a/test_model/capture_image.py
+++ b/test_model/capture_image.py
@@ -25,14 +25,21 @@ while True:
         #if you press space
         img_name= f"test_image_new{img_counter}.jpg"
         img = cv2.imwrite(img_name, frame)
-        image_path = directory_path + f"test_image_new{img_counter}.jpg"
-        print(image_path)
+
         print(f"image {img_name}was saved")
         img_counter+= 1
 cam.release()
 cv2.destroyAllWindows()
-print(img)
+image_path = os.path.join(directory_path, img_name)
+print(image_path)
+
 def crop_image(image_path):
+    """
+
+    :param image_path: Path to the image
+    :return: tuple containing a boolean indicating successful detection and the cropped
+    image if successful otherwise (False, None)
+    """
     detector = MTCNN()
     img = cv2.imread(image_path)
     data = detector.detect_faces(img)
@@ -59,4 +66,5 @@ if x==1 :
     print("image was accepted")
     cv2.imwrite("test_image_new.jpg", img)
 else:
+
     print('image was not accepted')
\ No newline at end of file
diff --git a/test_model/convert_tf_lite.py b/test_model/convert_tf_lite.py
index b4d145a20bb13e21a63ddfc630a31d889c73a86e..3f43b0412d4493dfa5adf4c2ff78ec5997549a4c 100644
--- a/test_model/convert_tf_lite.py
+++ b/test_model/convert_tf_lite.py
@@ -8,6 +8,11 @@ from keras.applications import Xception
 from keras.models import Model, Sequential
 #os.chdir(r"C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras")
 def image_embedder(input_shape):
+  """
+
+  :param input_shape: take the input shape which the CNN will expect
+  :return: The convolutional neural network that will be used in embedding.
+  """
   """ Returns the convolutional neural network that will generate the encodings of each picture """
 
   pretrained_model = Xception(
diff --git a/test_model/test_model.py b/test_model/test_model.py
index 81d432a3a59ca66a71ebd099771b12b8023846ac..f2581a4e13adb9906ec35c3e4f6b95517de61e64 100644
--- a/test_model/test_model.py
+++ b/test_model/test_model.py
@@ -13,26 +13,34 @@ from pathlib import Path
 
 
 from sklearn.metrics import accuracy_score, confusion_matrix,ConfusionMatrixDisplay
-#from sklearn.metrics.ConfusionMatrixDisplay import plot_confusion_matrix
+
 
 directory_path = os.getcwd()
 Path = os.path.join(directory_path, 'Extracted Faces')
 print(Path)
-#Path = r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\Dataset_tools\Extracted Faces'
-#Path =r'G:\datasets\new_dat'
 random.seed(5)
 np.random.seed(5)
 tf.random.set_seed(5)
 folders = os.listdir(Path)
 
 def read_imagee(index):
+    """
+
+    :param index: takes the  index of an image list .
+    :return:a 3D tensor of the following shape will be returned [height, width, channels]
+    """
 
     image_path = os.path.join(Path, index[0], index[1])
     image_string = tf.io.read_file(image_path)
     image = tf.image.decode_jpeg(image_string, channels=3)
     image = tf.image.convert_image_dtype(image, tf.float32)
-    #image = tf.image.resize(image, (224,224))
+    image = tf.image.resize(image, (224,224))
 def read_image(index):
+    """
+
+    :param index: takes the index to an image that contains the filename and path.
+    :return: a 3D tensor of the following shape will be returned [height, width, channels]
+    """
 
     path = os.path.join(Path, index[0], index[1])
     image = cv2.imread(path, cv2.COLOR_BGR2RGB)
@@ -52,6 +60,13 @@ test_triplet = create_triplets(Path, test_list, max_files=2)
 print(f'this is the Triplet list  {test_triplet}')
 
 def batch_generator(triplet_list, batch_size=256, preprocessing = False):
+    """
+
+    :param triplet_list: this is list containing the tripelets
+    :param batch_size: size of the image batch
+    :param preprocessing: specifies if any preprocessing is applied
+    :return: returns a batch of images (images are in numpy array form).
+    """
     batch_step = len(triplet_list)//batch_size
 
     for i in range(batch_step+1):
@@ -76,6 +91,11 @@ def batch_generator(triplet_list, batch_size=256, preprocessing = False):
 
         yield ([anchor, positive, negative])
 def data_generator(triplet_list):
+    """
+
+    :param triplet_list: list of triplets in the following form (anchor, positive, negative)
+    :return: returns a tuple containing the images that are in numpy array form.
+    """
     batch_step = len(triplet_list)
     anchor = []
     positive = []
@@ -97,12 +117,20 @@ def data_generator(triplet_list):
 
 encoder = image_embedder((224,224,3))
 
-encoder.load_weights(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras')
+encoder.load_weights(os.path.join('model', 'encoder.keras'))
 
 
 
 
 def classify_images(face_list1, face_list2, threshold=1.2):
+    """
+
+    :param face_list1: This is a list of images can be anchor, positive or negative
+    :param face_list2: this is a list of images, can be anchor , positive or negative
+    :param threshold: A value for the euclidean distance that determines if two images
+    belong to the same class.
+    :return:
+    """
     # Getting the encodings for the passed faces
     embeddings1 = encoder.predict(face_list1)
     embeddings2 = encoder.predict(face_list2)
@@ -113,6 +141,12 @@ def classify_images(face_list1, face_list2, threshold=1.2):
 
 
 def ModelMetrics(pos_list, neg_list):
+    """
+
+    :param pos_list: Takes a list containing the predictions of the model on anchor and positive samples
+    :param neg_list:Takes a list containing the predictions of the model on anchor and negative samples
+    :return: a confusion matrix
+    """
     true = np.array([0] * len(pos_list) + [1] * len(neg_list))
     pred = np.append(pos_list, neg_list)
 
diff --git a/test_model/use_model.py b/test_model/use_model.py
index 5151b0e67f02544260f4c7833c49cb6e479a381b..1347f0732202e11f161e9ae6e4a4df6c7521c374 100644
--- a/test_model/use_model.py
+++ b/test_model/use_model.py
@@ -14,10 +14,11 @@ from facial_recognition.Siamese_model import image_embedder, get_siamese_network
 
 directory_path = os.getcwd()
 database_path = os.path.join(directory_path, 'database')
+encoder_path = os.path.join(directory_path, 'encoder.keras')
 
-#parser = argparse.ArgumentParser(description='gives filename')
-#parser.add_argument('file_name', metavar='file_name', type=str, help='gives filename')
-#args = parser.parse_args()
+parser = argparse.ArgumentParser(description='gives filename')
+parser.add_argument('file_name', metavar='file_name', type=str, help='gives filename')
+args = parser.parse_args()
 
 
 def read_image(Path):
@@ -33,8 +34,15 @@ def read_image(Path):
 
 encoder = image_embedder((224,224,3))
 
-encoder.load_weights(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras')
+encoder.load_weights(encoder_path)
 def classify_images(face_list1, face_list2, threshold=1.3):
+    """
+
+    :param face_list1:
+    :param face_list2:
+    :param threshold:
+    :return:
+    """
     # Getting the encodings for the passed faces
     tensor1 = encoder.predict(face_list1)
     tensor2 = encoder.predict(face_list2)
@@ -45,8 +53,8 @@ def classify_images(face_list1, face_list2, threshold=1.3):
         return 1
     else:
         return 0
-#file_name = args.file_name
-test_path = os.path.join(directory_path, 'test_image_2.jpg')
+file_name = args.file_name
+test_path = os.path.join(directory_path, file_name)
 img1 = np.array([read_image(Path=test_path)])
 result = {}
 for persons in os.listdir(database_path):
diff --git a/test_model/use_tflite.py b/test_model/use_tflite.py
index 4c903d3be73226de22cf84bc005a7471a137cfd8..ec24d0bb2571ff23336bf71205cf236a2fb2b11e 100644
--- a/test_model/use_tflite.py
+++ b/test_model/use_tflite.py
@@ -28,6 +28,11 @@ database_path = os.path.join(directory_path, 'database')
 
 
 def read_image(Path):
+    """
+
+    :param Path: Path to the image that will be read.
+    :return: Returns a 3D tensor containing the image data in the following shape [widht,height,channels].
+    """
 
 
     image = cv2.imread(Path, cv2.COLOR_BGR2RGB)
@@ -93,6 +98,13 @@ output_data2 = interpreter.get_tensor(output_details[0]['index'])
 print(output_data2)
 print(output_data)
 def classify_images(face_list1, face_list2, threshold=1.2):
+    """
+
+    :param face_list1: a list of labeled imgaes
+    :param face_list2:
+    :param threshold:
+    :return:
+    """
     # Getting the encodings for the passed faces
 
     tensor1 = interpreter.set_tensor(input_details[0]['index'], [face_list1])
diff --git a/train_model_tripletloss.py b/train_model_tripletloss.py
index 468851b96bbdbfb11093f1e3b54700999efb5b7c..091ef57804342aafdb138f078895e652840bf58b 100644
--- a/train_model_tripletloss.py
+++ b/train_model_tripletloss.py
@@ -10,7 +10,7 @@ from functions import split_dataset, Generate_dataset, create_triplets
 random.seed(5)
 np.random.seed(5)
 tf.random.set_seed(5)
-Path = "replace this with link to dataset"
+Path = "replace this with Path to dataset"
 
 physical_devices = tf.config.list_physical_devices('GPU')
 for gpu_instance in physical_devices:
@@ -23,6 +23,7 @@ train_dataset = Generate_dataset(Path=Path,list=train_triplet)
 train_dataset = train_dataset.shuffle(buffer_size=1024)
 train_dataset = train_dataset.batch(2048, drop_remainder=False)
 train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
+print(type(train_dataset))
 
 
 
@@ -34,23 +35,23 @@ strategy = tf.distribute.MirroredStrategy(gpus)
 with strategy.scope():
     emb_mod, model= get_siamese_network([224 ,224, 3])
 
-    checkpoint_path = r"/home/khlifi/Documents/model_semihard_triplet_loss/all/allweights_1024b_preprocessing/max_55.keras"
+    checkpoint_path = 'replace with path to checkpoint'
     model.compile(optimizer=tf.keras.optimizers.Adam(0.0001),loss=tfa.losses.TripletSemiHardLoss(margin=0.3))
     cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                      save_weights_only=True,
                                               verbose=1)
     history = model.fit(train_dataset, epochs=15, callbacks=[cp_callback])
     hist_df = pd.DataFrame(history.history)
-    hist_json_file = '/home/khlifi/Documents/more_data_preprocessing_on/all/history_2048_50.json'
+    hist_json_file = 'replace with path to history'
     with open(hist_json_file, mode='w') as f:
         hist_df.to_json(f)
-    hist_csv_file = '/home/khlifi/Documents/more_data_preprocessing_on/all/history_2048_50.csv'
+    hist_csv_file = 'replace with path to history'
     with open(hist_csv_file, mode='w') as f:
         hist_df.to_csv(f)
 
 
 
-model.save('/home/khlifi/Documents/model_semihard_triplet_loss/final/2048_batch_pre_on_max_50.keras')
+model.save('replace with path to directory ')