diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..26d33521af10bcc7fd8cea344038eaaeb78d0ef5
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/.idea/facial_recognition.iml b/.idea/facial_recognition.iml
new file mode 100644
index 0000000000000000000000000000000000000000..79b1fd80baf32bb9a0c8cbf315086c046608e675
--- /dev/null
+++ b/.idea/facial_recognition.iml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$">
+      <excludeFolder url="file://$MODULE_DIR$/venv" />
+    </content>
+    <orderEntry type="jdk" jdkName="Python 3.6" jdkType="Python SDK" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000000000000000000000000000000000000..65531ca992813bbfedbe43dfae5a5f4337168ed8
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
+</project>
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000000000000000000000000000000000000..7c95aff1721671a28893cabe47ccf6fe2cf3840a
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/facial_recognition.iml" filepath="$PROJECT_DIR$/.idea/facial_recognition.iml" />
+    </modules>
+  </component>
+</project>
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000000000000000000000000000000000000..94a25f7f4cb416c083d265558da75d457237d671
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="$PROJECT_DIR$" vcs="Git" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/Dataset_tools/Rename_vgg_face_dataset.py b/Dataset_tools/Rename_vgg_face_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bad9d6ce98e4363e4178594e22df2f3c285cc7b
--- /dev/null
+++ b/Dataset_tools/Rename_vgg_face_dataset.py
@@ -0,0 +1,19 @@
+import os
+
+
+
+
+
+#Path = "/home/khlifi/Documents/test_vgg"
+Path = "/home/khlifi/Documents/test_bgg"
+Folders = os.listdir(Path)
+for folders in Folders:
+    print(folders)
+    new_path = os.path.join(Path, folders)
+    list_of_files = os.listdir(new_path)
+    #for i in range(10):
+     #  print(list_of_files[i])
+    for i in range(len(list_of_files)):
+        old_path = os.path.join(Path, os.path.join(folders, list_of_files[i]))
+        new_path = os.path.join(Path, os.path.join(folders, str(i)+".jpg"))
+        os.rename(old_path, new_path)
diff --git a/Dataset_tools/Split_celeba_in_folders.py b/Dataset_tools/Split_celeba_in_folders.py
new file mode 100644
index 0000000000000000000000000000000000000000..275ea779f8cedfc3ff3ee02f3cd4f3cd8645463c
--- /dev/null
+++ b/Dataset_tools/Split_celeba_in_folders.py
@@ -0,0 +1,52 @@
+import csv
+import os
+import shutil
+
+
+def Read_Two_Column_File(file_path):
+    """
+
+    :param file_path: This is the file path
+    :return: Two lists containing the labels and the filenames corresponding to each label.
+    Y is the labels
+    X is the filenames
+    """
+    with open(file_path, 'r') as f_input:
+        csv_input = csv.reader(f_input, delimiter=' ', skipinitialspace=True)
+        x = []
+        y = []
+        for cols in csv_input:
+            x.append(cols[0])
+            y.append(cols[1])
+
+    return x, y
+
+x, y = Read_Two_Column_File(r'/home/khlifi/Downloads/identity_CelebA.txt')
+
+path = 'destination path'
+src_folder = 'source folder path'
+for i in range(len(x)):
+    b=0
+    for j in range(len(x)):
+
+
+        if y[i] == y[j]:
+            Path = os.path.join(path, y[j])
+            src_path = os.path.join(src_folder, x[j])
+            dest_path = os.path.join(Path, str(b)+".jpg")
+
+            if not os.path.isdir(Path) and not os.path.isfile(dest_path) and os.path.isfile(src_path):
+                print(f"Creating the new '{Path}' directory.")
+                os.mkdir(Path)
+                print(src_path)
+                print(dest_path)
+
+
+                shutil.copy(src_path, os.path.join(Path, str(b)+".jpg"))
+                b=+1
+
+            elif os.path.isdir(Path) and not os.path.isfile(dest_path) and os.path.isfile(src_path):
+                shutil.copy(src_path, os.path.join(Path, str(b) + ".jpg"))
+
+
+                b += 1
diff --git a/Dataset_tools/crop_celeba.py b/Dataset_tools/crop_celeba.py
new file mode 100644
index 0000000000000000000000000000000000000000..aaf9d0eae2dd68faed86d266b25bc73d2d410ea0
--- /dev/null
+++ b/Dataset_tools/crop_celeba.py
@@ -0,0 +1,51 @@
+from mtcnn import MTCNN
+import cv2
+import matplotlib
+import matplotlib.pyplot as plt
+from matplotlib.pyplot import imshow
+import os
+import shutil
+def crop_image(image_path):
+    """
+
+    :param image_path: Path to the input image
+    :return: tuple containing a boolean indicating successful detection and the cropped
+    image if successful otherwise (False, None)
+    """
+    detector = MTCNN()
+    img=cv2.imread(image_path)
+    data=detector.detect_faces(img)
+    biggest=0
+    if data !=[]:
+        for faces in data:
+            box=faces['box']
+            # calculate the area of the bounding box in the image
+            area = box[3] * box[2]
+            #Checks if the current detected face is the largest
+            if area>biggest:
+                biggest=area
+                bounding_box=box
+        # Ensure bounding box coordinates are non-negative
+        bounding_box[0]= 0 if bounding_box[0]<0 else bounding_box[0]
+        bounding_box[1]= 0 if bounding_box[1]<0 else bounding_box[1]
+        #crop  the face region from the imgae
+        cropped_img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]]
+        cropped_face = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb
+        return (True, cropped_face)
+    else:
+        return (False, None)
+#path = r"G:\datasets\celeb-A\img_align_celeba\000060.jpg"
+Path2 = 'replace with source path'
+dst_path= 'replace with destination path'
+all_files = os.listdir(Path2)
+for files in all_files:
+    img_path = os.path.join(Path2, files)
+    dst_path3 = os.path.join(dst_path, files)
+    if not os.path.isfile(dst_path3):
+        result, images = crop_image(img_path)
+        print(files)
+        print(img_path)
+        if result==True:
+            images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)
+            cv2.imwrite(dst_path3, images)
+
diff --git a/Dataset_tools/crop_vggface.py b/Dataset_tools/crop_vggface.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f22c40cc5df5dd2d7f901da37c770c12e16e7af
--- /dev/null
+++ b/Dataset_tools/crop_vggface.py
@@ -0,0 +1,64 @@
+from mtcnn import MTCNN
+import cv2
+import os
+def crop_image(image_path):
+    """
+
+    :param image_path: Path to the input image
+    :return: tuple containing a boolean indicating successful detection and the cropped
+    image if successful otherwise (False, None)
+    """
+    detector = MTCNN()
+    img=cv2.imread(image_path)
+    data=detector.detect_faces(img)
+    biggest=0
+    if data !=[]:
+        for faces in data:
+            box=faces['box']
+            # calculate the area of the bounding box in the image
+            area = box[3] * box[2]
+            #Checks if the current detected face is the largest
+            if area>biggest:
+                biggest=area
+                bounding_box=box
+        # Ensure bounding box coordinates are non-negative
+        bounding_box[0]= 0 if bounding_box[0]<0 else bounding_box[0]
+        bounding_box[1]= 0 if bounding_box[1]<0 else bounding_box[1]
+        #crop  the face region from the imgae
+        cropped_img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]]
+        cropped_face = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb
+        return (True, cropped_face)
+    else:
+        return (False, None)
+
+Path2 = 'replace with source directory'
+dst_path = 'replace with destination directory'
+all_folders = os.listdir(Path2)
+result = False
+
+for folders in all_folders:
+    c=0
+    folders_path = os.path.join(Path2, folders)
+    files_list = os.listdir(folders_path)
+    new_folder_path = os.path.join(dst_path, folders)
+    if not os.path.exists(new_folder_path):
+        os.mkdir(new_folder_path)
+    for i in range(len(files_list)):
+
+        img_path = os.path.join(folders_path, files_list[i])
+
+
+        dst_path3 = os.path.join(new_folder_path,  str(c)+".jpg")
+        if not os.path.isfile(dst_path3):
+            result, images = crop_image(img_path)
+
+
+
+
+
+            if result==True:
+                images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)
+                cv2.imwrite(dst_path3, images)
+                c += 1
+            else:
+                continue
\ No newline at end of file
diff --git a/Siamese_model.py b/Siamese_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..75370a2828024ff2c36d02a795c3f2ba08821ecf
--- /dev/null
+++ b/Siamese_model.py
@@ -0,0 +1,74 @@
+import tensorflow as tf
+
+from keras.applications.inception_v3 import preprocess_input
+from keras import backend, layers, metrics
+
+
+from keras.applications import Xception
+from keras.models import Model, Sequential
+
+
+
+
+def image_embedder(input_shape):
+    """
+
+    :param input_shape: Shape of th expected input
+    :return: the encoder
+    """
+    "this function creates a CNN that will be used to generate embeddings of the images"
+    "the layers until the 27th layer will be frozen"
+
+
+    pretrained_model = Xception(
+        input_shape=input_shape,
+        weights='imagenet',
+        include_top=False,
+        pooling='avg',
+    )
+    for i in range(len(pretrained_model.layers)-27):
+        pretrained_model.layers[i].trainable = False
+
+    encode_model = Sequential([
+        pretrained_model,
+        layers.Flatten(),
+        layers.Dense(512, activation='relu'),
+        layers.BatchNormalization(),
+        layers.Dense(256, activation="relu"),
+        layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
+    ], name="Encode_Model")
+    return encode_model
+
+
+
+def get_siamese_network(input_shape):
+    """
+
+    :param input_shape: shape of the input expected by the network
+    :return: the encoder and the siamse network.
+    """
+    encoder = image_embedder(input_shape)
+
+    # Define the input layers of the model for the inputs
+    anchor_input = layers.Input(input_shape, name="Anchor_Input")
+    positive_input = layers.Input(input_shape, name="Positive_Input")
+    negative_input = layers.Input(input_shape, name="Negative_Input")
+
+    # Here the embeddings will be generated
+    encoded_a = encoder(anchor_input)
+    encoded_p = encoder(positive_input)
+    encoded_n = encoder(negative_input)
+    inputs = [anchor_input, positive_input, negative_input]
+    outputs = [encoded_a, encoded_p, encoded_n]
+
+    # Connect the inputs with the outputs
+    siamese_triplet = tf.keras.Model(inputs=inputs, outputs=outputs)
+
+    # return the model
+    return encoder, siamese_triplet
+
+if __name__ == '__main__':
+
+    print('Siamese_model is running directly from original file')
+else:
+    print('Siamese_model is running from import')
\ No newline at end of file
diff --git a/face_image_quality/face_image_quality.py b/face_image_quality/face_image_quality.py
new file mode 100644
index 0000000000000000000000000000000000000000..204654822966254b1beea3f8c21f6fbaa021f1ab
--- /dev/null
+++ b/face_image_quality/face_image_quality.py
@@ -0,0 +1,67 @@
+import cv2
+
+import numpy as np
+import os
+from numpy.linalg import norm
+
+
+
+image_path = r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\database\1\0.jpg'
+
+image = cv2.imread(image_path)
+#gray = cv2.imread(r'/home/khlifi/Documents/test_bgg/n000017/53.jpg', 0)
+
+
+def brightness(img):
+    """
+
+    :param img: Image path
+    :return: Euclidean norm
+    """
+    # A good value for brightness would be 80
+    if len(img.shape) == 3:
+        # Colored RGB or BGR (*Do Not* use HSV images with this function)
+        # create brightness with Euclidean norm
+        return np.average(norm(img, axis=2)) / np.sqrt(3)
+    else:
+        # Grayscale
+        return np.average(img)
+#print(f"brightness is: {brightness(img)}")
+def blurr_detection(img):
+    """
+
+    :param img: the image path
+    :return: the variance of the laplacian of an image
+    """
+
+    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+    variance_of_laplacian = cv2.Laplacian(gray, cv2.CV_64F).var()
+    return variance_of_laplacian
+def resolution_measurement(img):
+    """
+
+    :param img: takes the image path as input
+    :return: returns the width and height
+    """
+    wid = img.shape[1]
+    hgt = img.shape[0]
+    print(str(wid) + "x" + str(hgt))
+    return(wid, hgt)
+#resolution_measurement(img)
+
+def check_image_quality(image_path):
+    """
+
+    :param image_path: takes the image_path as input, to check the quality of the images
+    :return: 1 if the image passed quality check, 0 if not
+    """
+    img = cv2.imread(image_path)
+    #if resolution_measurement(img) == (383,524) and blurr_detection(img)>100 and brightness(img)>90:
+    if blurr_detection(img) > 100 and brightness(img) > 90:
+        print("image quality is acceptable")
+        return 1
+    else:
+        print('image was not accepted')
+        return 0
+
+check_image_quality(image_path)
diff --git a/functions.py b/functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..4436061e048c906c84efa5fb103fc4e0a4590dad
--- /dev/null
+++ b/functions.py
@@ -0,0 +1,148 @@
+import os
+import tensorflow as tf
+import keras
+from keras.applications.inception_v3 import preprocess_input
+import random
+
+def get_image(image_path, preprocessing=False, resize=False, shape= (224,224,3)):
+    """
+
+    :param image_path: Path to the image file
+    :param preprocessing: if it's set to true, preprocessing will be performed, it's by default false
+    :param resize: Set to true if resizing is needed
+    :param shape: Image size for resizing
+    :return: the 3d tensor containing the image information
+    """
+
+    image_string = tf.io.read_file(image_path)
+    image = tf.image.decode_jpeg(image_string, channels=3)
+    image = tf.image.convert_image_dtype(image, tf.float32)
+    if resize == True:
+        image = tf.image.resize(image, (224,224))
+    if preprocessing==True:
+        #Preprocessing will scale the input to (0,1) or (-1,1) depending on the used CNN
+        image = preprocess_input(image)
+    return image
+def preprocess_images(anchor_img, positive_img, negative_img):
+    return (preprocess_input(get_image(anchor_img)),preprocess_input(get_image(positive_img)),preprocess_input(get_image(negative_img)))
+
+
+def split_dataset(directory, split=0.9):
+    """
+
+    :param directory: Path to the directory containing the dataset
+    :param split: The percentage of data that will be used in training.
+    :return: returns two dictionaries in the following shape : {'label', 'number of images corresponding to that label}
+    """
+    all_files = os.listdir(directory)
+    len_train = int(len(all_files) * split)
+    random.shuffle(all_files)
+    train_list, test_list = {}, {}
+#Check if directory exists before accessing the files list. to avoid Notadirectoryerror.
+    try:
+        for folder in all_files[:len_train-1]:
+            path_train= os.path.join(directory, folder)
+            if os.path.isdir(path_train):
+                num_train = len(os.listdir(path_train))
+                train_list[folder] = num_train
+            else:
+                continue
+
+        for folder in all_files[len_train:]:
+            path_test = os.path.join(directory, folder)
+            if os.path.isdir(path_test):
+
+                num_test = len(os.listdir(path_test))
+                test_list[folder] = num_test
+            else:
+                continue
+    except NotADirectoryError:
+        print('No directory found')
+    return train_list, test_list
+
+def create_triplets(directory, folder_list, max_files=20):
+    """
+
+    :param directory: Path to the directory of the dataset
+    :param folder_list: this is the dictionary given by the 'split dataset function'
+    :param max_files: maximum number of files that can be used for triplet creation from each label
+    :return: list of tuples, each tuple containing 3 tuples (anchor, positive, negative).
+    Tuples are in the following form : ('label', 'filename')
+    """
+    "Creates triplets from the generated dataset lists."
+    triplets = []
+    list_folders = list(folder_list.keys())
+
+    for folder in list_folders:
+        path = os.path.join(directory, folder)
+        files = list(os.listdir(path))[:max_files]
+        num_files = len(files)
+
+        for i in range(num_files - 1):
+            for j in range(i + 1, num_files):
+                anchor = (folder, f"{i}.jpg")
+                positive = (folder, f"{j}.jpg")
+
+                neg_folder = folder
+                while neg_folder == folder:
+                    neg_folder = random.choice(list_folders)
+                neg_file = random.randint(0, folder_list[neg_folder] - 1)
+                negative = (neg_folder, f"{neg_file}.jpg")
+
+                triplets.append((anchor, positive, negative))
+
+    random.shuffle(triplets)
+    return triplets
+
+def Generate_dataset(list, Path):
+    """
+
+    :param list: This is the triplet list
+    :param Path: Path to the dataset
+    :return: returns a tensorflow dataset of images.
+    """
+    "this function will create a tf dataset "
+    anchor_label = []
+    positive_label = []
+    negative_label = []
+    positive_image_path = []
+    anchor_image_path = []
+    negative_image_path = []
+    for i in range(len(list)):
+
+        a, p, n = list[i]
+        anchor_path = os.path.join(Path, a[0], a[1])
+        positive_path = os.path.join(Path, p[0], p[1])
+        negative_path = os.path.join(Path, n[0], n[1])
+        anchor_label.append(a[0])
+        positive_label.append(p[0])
+        negative_label.append(n[0])
+        anchor_image_path.append(anchor_path)
+        positive_image_path.append(positive_path)
+        negative_image_path.append(negative_path)
+
+    anchor_dataset = tf.data.Dataset.from_tensor_slices(anchor_image_path)
+    anchor_dataset = anchor_dataset.map(get_image)
+
+    positive_dataset = tf.data.Dataset.from_tensor_slices(positive_image_path)
+
+    positive_dataset = positive_dataset.map(get_image)
+
+
+    negative_dataset = tf.data.Dataset.from_tensor_slices(negative_image_path)
+
+
+    negative_dataset = negative_dataset.map(get_image)
+
+
+    image_labels = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(anchor_label), tf.data.Dataset.ftensor_slices(positive_label), tf.data.Dataset.from_tensor_slices(negative_label)))
+    image_dataset = tf.data.Dataset.zip((anchor_dataset, positive_dataset, negative_dataset))
+    dataset = tf.data.Dataset.zip((image_dataset, image_labels))
+    return dataset
+
+
+if __name__ == '__main__':
+
+    print('function is running directly from original file')
+else:
+    print('functions is running from import')
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e99009c71a7fee96cc3da6a745e83213c03bec67
Binary files /dev/null and b/requirements.txt differ
diff --git a/test_model/capture_image.py b/test_model/capture_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..c432df18ece39390f7b8ce6c1f11390239d5eedc
--- /dev/null
+++ b/test_model/capture_image.py
@@ -0,0 +1,70 @@
+import os
+
+import cv2
+from mtcnn import MTCNN
+from facial_recognition.face_image_quality.face_image_quality import check_image_quality
+
+
+directory_path = os.getcwd()
+cam = cv2.VideoCapture(0)
+
+cv2.namedWindow("test")
+img_counter= 0
+while True:
+    result, frame=cam.read()
+    if not result:
+        print("failed to access frame")
+        break
+    cv2.imshow("test", frame)
+    k = cv2.waitKey(1)
+    if k%256==27:
+        #if you press ESC
+        print("ESC pressed, closing")
+        break
+    elif k%256==32:
+        #if you press space
+        img_name= f"test_image_new{img_counter}.jpg"
+        img = cv2.imwrite(img_name, frame)
+
+        print(f"image {img_name}was saved")
+        img_counter+= 1
+cam.release()
+cv2.destroyAllWindows()
+image_path = os.path.join(directory_path, img_name)
+print(image_path)
+
+def crop_image(image_path):
+    """
+
+    :param image_path: Path to the image
+    :return: tuple containing a boolean indicating successful detection and the cropped
+    image if successful otherwise (False, None)
+    """
+    detector = MTCNN()
+    img = cv2.imread(image_path)
+    data = detector.detect_faces(img)
+    biggest = 0
+    if data != []:
+        for faces in data:
+            box = faces['box']
+            # calculate the area in the image
+            area = box[3] * box[2]
+            if area > biggest:
+                biggest = area
+                bbox = box
+        bbox[0] = 0 if bbox[0] < 0 else bbox[0]
+        bbox[1] = 0 if bbox[1] < 0 else bbox[1]
+        img = img[bbox[1]: bbox[1] + bbox[3], bbox[0]: bbox[0] + bbox[2]]
+        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # convert from bgr to rgb
+        return (True, img)
+    else:
+        return (False, None)
+img = crop_image(image_path)
+
+x = check_image_quality(image_path)
+if x==1 :
+    print("image was accepted")
+    cv2.imwrite("test_image_new.jpg", img)
+else:
+
+    print('image was not accepted')
\ No newline at end of file
diff --git a/test_model/convert_tf_lite.py b/test_model/convert_tf_lite.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f43b0412d4493dfa5adf4c2ff78ec5997549a4c
--- /dev/null
+++ b/test_model/convert_tf_lite.py
@@ -0,0 +1,53 @@
+import tensorflow as tf
+import os
+import keras
+from keras import backend, layers, metrics
+
+
+from keras.applications import Xception
+from keras.models import Model, Sequential
+#os.chdir(r"C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras")
+def image_embedder(input_shape):
+  """
+
+  :param input_shape: take the input shape which the CNN will expect
+  :return: The convolutional neural network that will be used in embedding.
+  """
+  """ Returns the convolutional neural network that will generate the encodings of each picture """
+
+  pretrained_model = Xception(
+    input_shape=input_shape,
+    weights='imagenet',
+    include_top=False,
+    pooling='avg',
+  )
+
+  for i in range(len(pretrained_model.layers) - 27):
+    pretrained_model.layers[i].trainable = False
+
+  encode_model = Sequential([
+    pretrained_model,
+    layers.Flatten(),
+    layers.Dense(512, activation='relu'),
+    layers.BatchNormalization(),
+    layers.Dense(256, activation="relu"),
+    layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
+  ], name="Encode_Model")
+  return encode_model
+
+model = image_embedder((224,224,3))
+model.load_weights(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras')
+saved_model_dir = r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras'
+# Convert the model
+#encoder.summary()
+#model.save('model_complete.keras')
+#model = tf.keras.models.load_model(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model_complete.keras', safe_mode=False)
+converter = tf.lite.TFLiteConverter.from_keras_model(model) # path to the SavedModel directory
+converter.optimizations = [tf.lite.Optimize.DEFAULT]
+
+
+tflite_model = converter.convert()
+
+# Save the model.
+with open('model/tf_lite_optimized_for_size.tflite', 'wb') as f:
+  f.write(tflite_model)
\ No newline at end of file
diff --git a/test_model/database/1/0.jpg b/test_model/database/1/0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4b4992f7ed74c04f69827f2234823c4006d73b74
Binary files /dev/null and b/test_model/database/1/0.jpg differ
diff --git a/test_model/database/1/1.jpg b/test_model/database/1/1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..62d0971e27705e925418c691a623639de083897b
Binary files /dev/null and b/test_model/database/1/1.jpg differ
diff --git a/test_model/database/1/10.jpg b/test_model/database/1/10.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a95f5ad4ebf1bc31a29b1bb5f1e1af2816fadbf3
Binary files /dev/null and b/test_model/database/1/10.jpg differ
diff --git a/test_model/database/1/19.jpg b/test_model/database/1/19.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8d38e66f3b46635bd3c5b804a25a5fe1087a61a6
Binary files /dev/null and b/test_model/database/1/19.jpg differ
diff --git a/test_model/database/1/2.jpg b/test_model/database/1/2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6b542fe2fc2d31a05673236f828818c1c8fe811c
Binary files /dev/null and b/test_model/database/1/2.jpg differ
diff --git a/test_model/database/1/20.jpg b/test_model/database/1/20.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9b48a6cc82133e78b90aad3690d818d5807dc851
Binary files /dev/null and b/test_model/database/1/20.jpg differ
diff --git a/test_model/database/1/21.jpg b/test_model/database/1/21.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c6745db1658de6a1298b53c1875b28c39bea4ddc
Binary files /dev/null and b/test_model/database/1/21.jpg differ
diff --git a/test_model/database/1/3.jpg b/test_model/database/1/3.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b801e55c79fc9fe3c73645e910709e86079be013
Binary files /dev/null and b/test_model/database/1/3.jpg differ
diff --git a/test_model/database/1/4.jpg b/test_model/database/1/4.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5dcf17fc90dacd034db95df1fc31d219166fadd0
Binary files /dev/null and b/test_model/database/1/4.jpg differ
diff --git a/test_model/database/1/5.jpg b/test_model/database/1/5.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..aa9bcca3add88c0711d6bd612483ce1359dd7ae4
Binary files /dev/null and b/test_model/database/1/5.jpg differ
diff --git a/test_model/database/1/6.jpg b/test_model/database/1/6.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..eacba3b01acb8dbb4b99360b7421e4edb67b3b0d
Binary files /dev/null and b/test_model/database/1/6.jpg differ
diff --git a/test_model/database/1/7.jpg b/test_model/database/1/7.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..bb341672379e7bd9eb66f80ca0b0ad83c1602dd9
Binary files /dev/null and b/test_model/database/1/7.jpg differ
diff --git a/test_model/database/1/8.jpg b/test_model/database/1/8.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d17fa6e0f61a63e8b734ca436a1ef375983064da
Binary files /dev/null and b/test_model/database/1/8.jpg differ
diff --git a/test_model/database/1/9.jpg b/test_model/database/1/9.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..af79fe8f0fd3e915cb75526c8a40c3628cbb77be
Binary files /dev/null and b/test_model/database/1/9.jpg differ
diff --git a/test_model/database/2/0.jpg b/test_model/database/2/0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7644280c72d393f815d2a35683f2d35b83a007a0
Binary files /dev/null and b/test_model/database/2/0.jpg differ
diff --git a/test_model/database/2/1.jpg b/test_model/database/2/1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0a9c68387614aff202ceac42ff7e98586640a6d0
Binary files /dev/null and b/test_model/database/2/1.jpg differ
diff --git a/test_model/database/2/2.jpg b/test_model/database/2/2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..868abee9cb185da4b92ca78188b52638221aee5f
Binary files /dev/null and b/test_model/database/2/2.jpg differ
diff --git a/test_model/database/2/3.jpg b/test_model/database/2/3.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cf8674362678ef651cdc2c32aa9aa79ff6139604
Binary files /dev/null and b/test_model/database/2/3.jpg differ
diff --git a/test_model/database/2/4.jpg b/test_model/database/2/4.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0c6a3ad4feef3cb0bde89e06d3265f954f913b48
Binary files /dev/null and b/test_model/database/2/4.jpg differ
diff --git a/test_model/database/2/5.jpg b/test_model/database/2/5.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..93d8c7988d859dc2e95e3dd138e03b4d27977af3
Binary files /dev/null and b/test_model/database/2/5.jpg differ
diff --git a/test_model/database/2/6.jpg b/test_model/database/2/6.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d303b4019ffb6acadb55643e5d726d982b637aa5
Binary files /dev/null and b/test_model/database/2/6.jpg differ
diff --git a/test_model/database/2/7.jpg b/test_model/database/2/7.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7a42c15a0affaf7713fc74dfc6265aa17c2fd5f8
Binary files /dev/null and b/test_model/database/2/7.jpg differ
diff --git a/test_model/database/2/8.jpg b/test_model/database/2/8.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6a28e0032604b1f7a5da0783bc2895bba45e179b
Binary files /dev/null and b/test_model/database/2/8.jpg differ
diff --git a/test_model/model/2048_batch_pre_on_max_50_12_937%.keras b/test_model/model/2048_batch_pre_on_max_50_12_937%.keras
new file mode 100644
index 0000000000000000000000000000000000000000..440dc7dccd9cb7fbd7d55e244fbc37bdc327ef6d
Binary files /dev/null and b/test_model/model/2048_batch_pre_on_max_50_12_937%.keras differ
diff --git a/test_model/model/encoder.keras b/test_model/model/encoder.keras
new file mode 100644
index 0000000000000000000000000000000000000000..6bea224de1770e9077958122fe611aff37536a2d
Binary files /dev/null and b/test_model/model/encoder.keras differ
diff --git a/test_model/model/model.tflite b/test_model/model/model.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..68bd5d489cab35a5feacbb44b4f461476b902f45
Binary files /dev/null and b/test_model/model/model.tflite differ
diff --git a/test_model/model/tf_lite_model.tflite b/test_model/model/tf_lite_model.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..8b7777bd63733b4d587f35e731ac117d532b6523
Binary files /dev/null and b/test_model/model/tf_lite_model.tflite differ
diff --git a/test_model/model/tf_lite_optimized_for_size.tflite b/test_model/model/tf_lite_optimized_for_size.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..8b7777bd63733b4d587f35e731ac117d532b6523
Binary files /dev/null and b/test_model/model/tf_lite_optimized_for_size.tflite differ
diff --git a/test_model/model_complete.keras b/test_model/model_complete.keras
new file mode 100644
index 0000000000000000000000000000000000000000..c5541bf6f612cc69f5ca6e99661b306576638a3c
Binary files /dev/null and b/test_model/model_complete.keras differ
diff --git a/test_model/test_image_1.jpg b/test_model/test_image_1.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..368aad1000b600b99c38674b4210b589a8604911
Binary files /dev/null and b/test_model/test_image_1.jpg differ
diff --git a/test_model/test_image_2.jpg b/test_model/test_image_2.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0a9c68387614aff202ceac42ff7e98586640a6d0
Binary files /dev/null and b/test_model/test_image_2.jpg differ
diff --git a/test_model/test_model.py b/test_model/test_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2581a4e13adb9906ec35c3e4f6b95517de61e64
--- /dev/null
+++ b/test_model/test_model.py
@@ -0,0 +1,189 @@
+import tensorflow as tf
+import os
+import random
+import numpy as np
+import cv2
+
+from facial_recognition.Siamese_model import image_embedder
+from facial_recognition.functions import split_dataset,create_triplets
+import seaborn as sns
+import matplotlib.pyplot as plt
+from keras.applications.inception_v3 import preprocess_input
+from pathlib import Path
+
+
+from sklearn.metrics import accuracy_score, confusion_matrix,ConfusionMatrixDisplay
+
+
+directory_path = os.getcwd()
+Path = os.path.join(directory_path, 'Extracted Faces')
+print(Path)
+random.seed(5)
+np.random.seed(5)
+tf.random.set_seed(5)
+folders = os.listdir(Path)
+
+def read_imagee(index):
+    """
+
+    :param index: takes the  index of an image list .
+    :return:a 3D tensor of the following shape will be returned [height, width, channels]
+    """
+
+    image_path = os.path.join(Path, index[0], index[1])
+    image_string = tf.io.read_file(image_path)
+    image = tf.image.decode_jpeg(image_string, channels=3)
+    image = tf.image.convert_image_dtype(image, tf.float32)
+    image = tf.image.resize(image, (224,224))
+def read_image(index):
+    """
+
+    :param index: takes the index to an image that contains the filename and path.
+    :return: a 3D tensor of the following shape will be returned [height, width, channels]
+    """
+
+    path = os.path.join(Path, index[0], index[1])
+    image = cv2.imread(path, cv2.COLOR_BGR2RGB)
+    image = tf.image.resize(image, (224,224))
+    image = image / 255.0
+
+    return image
+
+
+
+
+train_list, test_list = split_dataset(Path, 0.8)
+print(f'this is the {test_list}')
+
+
+test_triplet = create_triplets(Path, test_list, max_files=2)
+print(f'this is the Triplet list  {test_triplet}')
+
+def batch_generator(triplet_list, batch_size=256, preprocessing = False):
+    """
+
+    :param triplet_list: this is list containing the tripelets
+    :param batch_size: size of the image batch
+    :param preprocessing: specifies if any preprocessing is applied
+    :return: returns a batch of images (images are in numpy array form).
+    """
+    batch_step = len(triplet_list)//batch_size
+
+    for i in range(batch_step+1):
+        anchor = []
+        positive = []
+        negative = []
+        j = i*batch_size
+        while j < (i+1)*batch_size and j < len(triplet_list):
+            a, p, n = triplet_list[j]
+            anchor.append(read_image(a)) # dont forget preprocess
+            positive.append(read_image(p))
+            negative.append(read_image(n))
+            j+=1
+        anchor = np.array(anchor, dtype="float")
+        positive = np.array(positive, dtype="float")
+        negative = np.array(negative, dtype="float")
+
+        if preprocessing:
+            anchor = preprocess_input(anchor)
+            positive = preprocess_input(positive)
+            negative = preprocess_input(negative)
+
+        yield ([anchor, positive, negative])
+def data_generator(triplet_list):
+    """
+
+    :param triplet_list: list of triplets in the following form (anchor, positive, negative)
+    :return: returns a tuple containing the images that are in numpy array form.
+    """
+    batch_step = len(triplet_list)
+    anchor = []
+    positive = []
+    negative = []
+    for i in range(batch_step):
+
+        a, p, n = triplet_list[i]
+        anchor.append(read_image(a))
+        positive.append(read_image(p))
+        negative.append(read_image(n))
+    anchor = np.array(anchor, dtype="float")
+    positive = np.array(positive, dtype="float")
+    negative = np.array(negative, dtype="float")
+
+
+
+    yield ([anchor, positive, negative])
+
+
+encoder = image_embedder((224,224,3))
+
+encoder.load_weights(os.path.join('model', 'encoder.keras'))
+
+
+
+
+def classify_images(face_list1, face_list2, threshold=1.2):
+    """
+
+    :param face_list1: This is a list of images can be anchor, positive or negative
+    :param face_list2: this is a list of images, can be anchor , positive or negative
+    :param threshold: A value for the euclidean distance that determines if two images
+    belong to the same class.
+    :return:
+    """
+    # Getting the encodings for the passed faces
+    embeddings1 = encoder.predict(face_list1)
+    embeddings2 = encoder.predict(face_list2)
+
+    distance = np.sum(np.square(embeddings1 - embeddings2), axis=-1)
+    prediction = np.where(distance <= threshold, 0, 1)
+    return prediction
+
+
+def ModelMetrics(pos_list, neg_list):
+    """
+
+    :param pos_list: Takes a list containing the predictions of the model on anchor and positive samples
+    :param neg_list:Takes a list containing the predictions of the model on anchor and negative samples
+    :return: a confusion matrix
+    """
+    true = np.array([0] * len(pos_list) + [1] * len(neg_list))
+    pred = np.append(pos_list, neg_list)
+
+    # Compute and print the accuracy
+    print(f"\nAccuracy of model: {accuracy_score(true, pred)}\n")
+
+    # Compute and plot the Confusion matrix
+
+    cf_matrix = confusion_matrix(true, pred)
+    #disp = ConfusionMatrixDisplay(confusion_matrix=cf_matrix, )
+    #disp.plot()
+    #plt.show()
+    categories = ['Similar', 'Different']
+    names = ['True Similar', 'False Similar', 'False Different', 'True Different']
+    percentages = ['{0:.2%}'.format(value) for value in cf_matrix.flatten() / np.sum(cf_matrix)]
+
+    labels = [f'{v1}\n{v2}' for v1, v2 in zip(names, percentages)]
+    labels = np.asarray(labels).reshape(2, 2)
+
+    sns.heatmap(cf_matrix, annot=labels, cmap='Blues', fmt='',
+                xticklabels=categories, yticklabels=categories)
+
+    plt.xlabel("Predicted", fontdict={'size': 14}, labelpad=10)
+    plt.ylabel("Actual", fontdict={'size': 14}, labelpad=10)
+    plt.title("Confusion Matrix", fontdict={'size': 18}, pad=20)
+    plt.show()
+
+pos_list = np.array([])
+neg_list = np.array([])
+
+for data in data_generator(test_triplet):
+
+    a, p, n = data
+    pos_list = np.append(pos_list, classify_images(a, p))
+
+    neg_list = np.append(neg_list, classify_images(a, n))
+    break
+
+ModelMetrics(pos_list, neg_list)
+
diff --git a/test_model/use_model.py b/test_model/use_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..1347f0732202e11f161e9ae6e4a4df6c7521c374
--- /dev/null
+++ b/test_model/use_model.py
@@ -0,0 +1,90 @@
+import tensorflow as tf
+import os
+import keras
+import numpy as np
+import cv2
+import sys
+import argparse
+
+
+from facial_recognition.Siamese_model import image_embedder, get_siamese_network
+
+
+
+
+directory_path = os.getcwd()
+database_path = os.path.join(directory_path, 'database')
+encoder_path = os.path.join(directory_path, 'encoder.keras')
+
+parser = argparse.ArgumentParser(description='gives filename')
+parser.add_argument('file_name', metavar='file_name', type=str, help='gives filename')
+args = parser.parse_args()
+
+
+def read_image(Path):
+
+
+    image = cv2.imread(Path, cv2.COLOR_BGR2RGB)
+    image = tf.image.resize(image, (224,224))
+    image = image / 255.0
+
+    return image
+
+
+
+encoder = image_embedder((224,224,3))
+
+encoder.load_weights(encoder_path)
+def classify_images(face_list1, face_list2, threshold=1.3):
+    """
+
+    :param face_list1:
+    :param face_list2:
+    :param threshold:
+    :return:
+    """
+    # Getting the encodings for the passed faces
+    tensor1 = encoder.predict(face_list1)
+    tensor2 = encoder.predict(face_list2)
+
+    distance = np.sum(np.square(tensor1 - tensor2), axis=-1)
+    #prediction = np.where(distance <= threshold, 1, 0)
+    if distance <= threshold:
+        return 1
+    else:
+        return 0
+file_name = args.file_name
+test_path = os.path.join(directory_path, file_name)
+img1 = np.array([read_image(Path=test_path)])
+result = {}
+for persons in os.listdir(database_path):
+    result[persons] = []
+    images_path = os.path.join(database_path, persons)
+    for images in os.listdir(images_path):
+        image_path = os.path.join(images_path, images)
+        img_person = np.array([read_image(Path=image_path)])
+        result1 = classify_images(img1, img_person)
+        result[persons].append(result1)
+print(result)
+identified_person =0
+max_match = 0
+for persons in result.keys():
+    match = np.sum(np.array(result[persons]) == 1)
+    match_percentage = (match / len(result[persons]))*100
+    if match_percentage >= max_match and match_percentage > 70:
+        identified_person = persons
+        max_match = match_percentage
+    else:
+        continue
+if identified_person==0:
+    unlock=False
+    print("no match was found")
+else:
+    unlock=True
+    print(f"identified person is {identified_person} with accuracy score {max_match}")
+
+
+
+
+
+
diff --git a/test_model/use_tflite.py b/test_model/use_tflite.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec24d0bb2571ff23336bf71205cf236a2fb2b11e
--- /dev/null
+++ b/test_model/use_tflite.py
@@ -0,0 +1,155 @@
+import tensorflow as tf
+import os
+import keras
+import numpy as np
+import cv2
+import sys
+import argparse
+
+
+#from Siamese_model import image_embedder, get_siamese_network
+#from functions import split_dataset,get_image,Generate_dataset, create_triplets
+
+
+from keras import layers
+from keras.applications import Xception
+from keras.models import Model, Sequential
+
+
+
+
+
+directory_path = os.getcwd()
+database_path = os.path.join(directory_path, 'database')
+
+#parser = argparse.ArgumentParser(description='gives filename')
+#parser.add_argument('file_name', metavar='file_name', type=str, help='gives filename')
+#args = parser.parse_args()
+
+
+def read_image(Path):
+    """
+
+    :param Path: Path to the image that will be read.
+    :return: Returns a 3D tensor containing the image data in the following shape [widht,height,channels].
+    """
+
+
+    image = cv2.imread(Path, cv2.COLOR_BGR2RGB)
+    image = tf.image.resize(image, (224,224))
+    image = image / 255.0
+
+    return image
+
+
+
+
+
+
+
+def image_embedder(input_shape):
+    """ Returns the image encoding model """
+
+    pretrained_model = Xception(
+        input_shape=input_shape,
+        weights='imagenet',
+        include_top=False,
+        pooling='avg',
+    )
+
+    for i in range(len(pretrained_model.layers)-27):
+        pretrained_model.layers[i].trainable = False
+
+    encode_model = Sequential([
+        pretrained_model,
+        layers.Flatten(),
+
+        layers.Dense(512, activation='relu'),
+        layers.BatchNormalization(),
+
+        layers.Dense(256, activation="relu"),
+        layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
+    ], name="Encode_Model")
+    return encode_model
+
+encoder = image_embedder((224,224,3))
+
+#encoder.load_weights(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model.tflite')
+
+interpreter = tf.lite.Interpreter(model_path=r"C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\tf_lite_model.tflite")
+interpreter.allocate_tensors()
+
+
+
+input_index = interpreter.get_input_details()[0]["index"]
+output_index = interpreter.get_output_details()[0]["index"]
+input_details = interpreter.get_input_details()
+output_details = interpreter.get_output_details()
+print(input_details)
+print(output_details)
+img1 = read_image(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\test_image_1.jpg')
+tensor1 = interpreter.set_tensor(input_details[0]['index'], [img1])
+interpreter.invoke()
+output_data = interpreter.get_tensor(output_details[0]['index'])
+img2 = read_image(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\test_image_2.jpg')
+tensor2 = interpreter.set_tensor(input_details[0]['index'], [img2])
+interpreter.invoke()
+output_data2 = interpreter.get_tensor(output_details[0]['index'])
+print(output_data2)
+print(output_data)
+def classify_images(face_list1, face_list2, threshold=1.2):
+    """
+
+    :param face_list1: a list of labeled imgaes
+    :param face_list2:
+    :param threshold:
+    :return:
+    """
+    # Getting the encodings for the passed faces
+
+    tensor1 = interpreter.set_tensor(input_details[0]['index'], [face_list1])
+    interpreter.invoke()
+    output_data = interpreter.get_tensor(output_details[0]['index'])
+
+    tensor2 = interpreter.set_tensor(input_details[0]['index'], [face_list2])
+    interpreter.invoke()
+    output_data2 = interpreter.get_tensor(output_details[0]['index'])
+    distance = np.sum(np.square(output_data - output_data2), axis=-1)
+    #prediction = np.where(distance <= threshold, 1, 0)
+    if distance <= threshold:
+        return 1
+    else: return 0
+#file_name = args.file_name
+test_path = os.path.join(directory_path, 'test_image_2.jpg')
+img1 = read_image(Path=test_path)
+result = {}
+for persons in os.listdir(database_path):
+    result[persons] = []
+    images_path = os.path.join(database_path, persons)
+    for images in os.listdir(images_path):
+        image_path = os.path.join(images_path, images)
+        img_person = read_image(Path=image_path)
+
+        result1 = classify_images(img1, img_person)
+        result[persons].append(result1)
+
+identified_person = 0
+max_match = 0
+for persons in result.keys():
+    match = np.sum(np.array(result[persons]) == 1)
+    match_percentage = (match / len(result[persons]))*100
+    if match_percentage >= max_match and match_percentage> 70:
+        identified_person = persons
+        max_match = match_percentage
+    else:
+        continue
+if identified_person == 0:
+    print("no matches were found")
+else:
+    print(f"identified person is {identified_person} with accuracy score {max_match}")
+
+
+
+
+
+
diff --git a/train_model_tripletloss.py b/train_model_tripletloss.py
new file mode 100644
index 0000000000000000000000000000000000000000..091ef57804342aafdb138f078895e652840bf58b
--- /dev/null
+++ b/train_model_tripletloss.py
@@ -0,0 +1,82 @@
+import random
+import numpy as np
+import tensorflow as tf
+import pandas as pd
+import tensorflow_addons as tfa
+from Siamese_model import image_embedder, get_siamese_network
+from functions import split_dataset, Generate_dataset, create_triplets
+
+
+random.seed(5)
+np.random.seed(5)
+tf.random.set_seed(5)
+Path = "replace this with Path to dataset"
+
+physical_devices = tf.config.list_physical_devices('GPU')
+for gpu_instance in physical_devices:
+    tf.config.experimental.set_memory_growth(gpu_instance, True)
+
+train_list, test_list = split_dataset(directory=Path, split=0.9)
+train_triplet = create_triplets(Path, train_list, max_files=55)
+test_triplet = create_triplets(Path, test_list, max_files=5)
+train_dataset = Generate_dataset(Path=Path,list=train_triplet)
+train_dataset = train_dataset.shuffle(buffer_size=1024)
+train_dataset = train_dataset.batch(2048, drop_remainder=False)
+train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
+print(type(train_dataset))
+
+
+
+
+
+tf.debugging.set_log_device_placement(True)
+gpus = tf.config.list_logical_devices('GPU')
+strategy = tf.distribute.MirroredStrategy(gpus)
+with strategy.scope():
+    emb_mod, model= get_siamese_network([224 ,224, 3])
+
+    checkpoint_path = 'replace with path to checkpoint'
+    model.compile(optimizer=tf.keras.optimizers.Adam(0.0001),loss=tfa.losses.TripletSemiHardLoss(margin=0.3))
+    cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
+                                                     save_weights_only=True,
+                                              verbose=1)
+    history = model.fit(train_dataset, epochs=15, callbacks=[cp_callback])
+    hist_df = pd.DataFrame(history.history)
+    hist_json_file = 'replace with path to history'
+    with open(hist_json_file, mode='w') as f:
+        hist_df.to_json(f)
+    hist_csv_file = 'replace with path to history'
+    with open(hist_csv_file, mode='w') as f:
+        hist_df.to_csv(f)
+
+
+
+model.save('replace with path to directory ')
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+