From be5ab673f1dd1e811490a517cbf5fdeb6b58db80 Mon Sep 17 00:00:00 2001 From: waelkh <waelkhlifi12@gmail.com> Date: Tue, 19 Mar 2024 03:14:05 +0100 Subject: [PATCH] Minor changes , mainly added comments --- Dataset_tools/Rename_vgg_face_dataset.py | 2 +- Dataset_tools/Split_celeba_in_folders.py | 15 +++++--- Dataset_tools/crop_celeba.py | 20 +++++++---- Dataset_tools/crop_vggface.py | 33 +++++++++-------- Siamese_model.py | 11 ++++++ face_image_quality/face_image_quality.py | 31 +++++++++++++--- functions.py | 30 +++++++++++++++- requirements.txt | Bin 0 -> 2852 bytes test_model/capture_image.py | 14 ++++++-- test_model/convert_tf_lite.py | 5 +++ test_model/test_model.py | 44 ++++++++++++++++++++--- test_model/use_model.py | 20 +++++++---- test_model/use_tflite.py | 12 +++++++ train_model_tripletloss.py | 11 +++--- 14 files changed, 198 insertions(+), 50 deletions(-) create mode 100644 requirements.txt diff --git a/Dataset_tools/Rename_vgg_face_dataset.py b/Dataset_tools/Rename_vgg_face_dataset.py index 1f7b1d3..0bad9d6 100644 --- a/Dataset_tools/Rename_vgg_face_dataset.py +++ b/Dataset_tools/Rename_vgg_face_dataset.py @@ -1,5 +1,5 @@ import os -import shutil + diff --git a/Dataset_tools/Split_celeba_in_folders.py b/Dataset_tools/Split_celeba_in_folders.py index 1193780..275ea77 100644 --- a/Dataset_tools/Split_celeba_in_folders.py +++ b/Dataset_tools/Split_celeba_in_folders.py @@ -3,8 +3,15 @@ import os import shutil -def Read_Two_Column_File(file_name): - with open(file_name, 'r') as f_input: +def Read_Two_Column_File(file_path): + """ + + :param file_path: This is the file path + :return: Two lists containing the labels and the filenames corresponding to each label. + Y is the labels + X is the filenames + """ + with open(file_path, 'r') as f_input: csv_input = csv.reader(f_input, delimiter=' ', skipinitialspace=True) x = [] y = [] @@ -16,8 +23,8 @@ def Read_Two_Column_File(file_name): x, y = Read_Two_Column_File(r'/home/khlifi/Downloads/identity_CelebA.txt') -path = r'/home/khlifi/Documents/newdat' -src_folder = r"/home/khlifi/Documents/datasets" +path = 'destination path' +src_folder = 'source folder path' for i in range(len(x)): b=0 for j in range(len(x)): diff --git a/Dataset_tools/crop_celeba.py b/Dataset_tools/crop_celeba.py index 7190e99..aaf9d0e 100644 --- a/Dataset_tools/crop_celeba.py +++ b/Dataset_tools/crop_celeba.py @@ -6,6 +6,12 @@ from matplotlib.pyplot import imshow import os import shutil def crop_image(image_path): + """ + + :param image_path: Path to the input image + :return: tuple containing a boolean indicating successful detection and the cropped + image if successful otherwise (False, None) + """ detector = MTCNN() img=cv2.imread(image_path) data=detector.detect_faces(img) @@ -13,23 +19,25 @@ def crop_image(image_path): if data !=[]: for faces in data: box=faces['box'] - # calculate the area in the image + # calculate the area of the bounding box in the image area = box[3] * box[2] + #Checks if the current detected face is the largest if area>biggest: biggest=area bounding_box=box + # Ensure bounding box coordinates are non-negative bounding_box[0]= 0 if bounding_box[0]<0 else bounding_box[0] bounding_box[1]= 0 if bounding_box[1]<0 else bounding_box[1] - img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]] - cropped_face = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb + #crop the face region from the imgae + cropped_img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]] + cropped_face = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb return (True, cropped_face) else: return (False, None) #path = r"G:\datasets\celeb-A\img_align_celeba\000060.jpg" -Path2 = r'/home/khlifi/Downloads/img_align_celeba' -dst_path= r"/home/khlifi/Documents/datasets" +Path2 = 'replace with source path' +dst_path= 'replace with destination path' all_files = os.listdir(Path2) -print(len(all_files)) for files in all_files: img_path = os.path.join(Path2, files) dst_path3 = os.path.join(dst_path, files) diff --git a/Dataset_tools/crop_vggface.py b/Dataset_tools/crop_vggface.py index 37386a7..6f22c40 100644 --- a/Dataset_tools/crop_vggface.py +++ b/Dataset_tools/crop_vggface.py @@ -1,10 +1,13 @@ from mtcnn import MTCNN import cv2 -#import matplotlib.pyplot as plt -#from matplotlib.pyplot import imshow import os -import shutil def crop_image(image_path): + """ + + :param image_path: Path to the input image + :return: tuple containing a boolean indicating successful detection and the cropped + image if successful otherwise (False, None) + """ detector = MTCNN() img=cv2.imread(image_path) data=detector.detect_faces(img) @@ -12,23 +15,25 @@ def crop_image(image_path): if data !=[]: for faces in data: box=faces['box'] - # calculate the area in the image + # calculate the area of the bounding box in the image area = box[3] * box[2] + #Checks if the current detected face is the largest if area>biggest: biggest=area - bbox=box - bbox[0]= 0 if bbox[0]<0 else bbox[0] - bbox[1]= 0 if bbox[1]<0 else bbox[1] - img =img[bbox[1]: bbox[1]+bbox[3],bbox[0]: bbox[0]+ bbox[2]] - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb - return (True, img) + bounding_box=box + # Ensure bounding box coordinates are non-negative + bounding_box[0]= 0 if bounding_box[0]<0 else bounding_box[0] + bounding_box[1]= 0 if bounding_box[1]<0 else bounding_box[1] + #crop the face region from the imgae + cropped_img =img[bounding_box[1]: bounding_box[1]+bounding_box[3],bounding_box[0]: bounding_box[0]+ bounding_box[2]] + cropped_face = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB) # convert from bgr to rgb + return (True, cropped_face) else: return (False, None) -#path = r"G:\datasets\celeb-A\img_align_celeba\000060.jpg" -Path2 = r"/home/khlifi/Documents/vgg_face/train" -dst_path = r"/home/khlifi/Documents/vggface_cropped" + +Path2 = 'replace with source directory' +dst_path = 'replace with destination directory' all_folders = os.listdir(Path2) -print(len(all_folders)) result = False for folders in all_folders: diff --git a/Siamese_model.py b/Siamese_model.py index fa47c77..75370a2 100644 --- a/Siamese_model.py +++ b/Siamese_model.py @@ -11,9 +11,15 @@ from keras.models import Model, Sequential def image_embedder(input_shape): + """ + + :param input_shape: Shape of th expected input + :return: the encoder + """ "this function creates a CNN that will be used to generate embeddings of the images" "the layers until the 27th layer will be frozen" + pretrained_model = Xception( input_shape=input_shape, weights='imagenet', @@ -36,6 +42,11 @@ def image_embedder(input_shape): def get_siamese_network(input_shape): + """ + + :param input_shape: shape of the input expected by the network + :return: the encoder and the siamse network. + """ encoder = image_embedder(input_shape) # Define the input layers of the model for the inputs diff --git a/face_image_quality/face_image_quality.py b/face_image_quality/face_image_quality.py index a61c566..2046548 100644 --- a/face_image_quality/face_image_quality.py +++ b/face_image_quality/face_image_quality.py @@ -6,28 +6,43 @@ from numpy.linalg import norm +image_path = r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\database\1\0.jpg' - -image_path = cv2.imread(r'G:\datasets\new_dat\1\006439.jpg') +image = cv2.imread(image_path) #gray = cv2.imread(r'/home/khlifi/Documents/test_bgg/n000017/53.jpg', 0) def brightness(img): + """ + + :param img: Image path + :return: Euclidean norm + """ # A good value for brightness would be 80 if len(img.shape) == 3: # Colored RGB or BGR (*Do Not* use HSV images with this function) - # create brightness with euclidean norm + # create brightness with Euclidean norm return np.average(norm(img, axis=2)) / np.sqrt(3) else: # Grayscale return np.average(img) #print(f"brightness is: {brightness(img)}") def blurr_detection(img): + """ + + :param img: the image path + :return: the variance of the laplacian of an image + """ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) variance_of_laplacian = cv2.Laplacian(gray, cv2.CV_64F).var() return variance_of_laplacian def resolution_measurement(img): + """ + + :param img: takes the image path as input + :return: returns the width and height + """ wid = img.shape[1] hgt = img.shape[0] print(str(wid) + "x" + str(hgt)) @@ -35,12 +50,18 @@ def resolution_measurement(img): #resolution_measurement(img) def check_image_quality(image_path): + """ + + :param image_path: takes the image_path as input, to check the quality of the images + :return: 1 if the image passed quality check, 0 if not + """ img = cv2.imread(image_path) - if resolution_measurement(img) == (383,524) and blurr_detection(img)>100 and brightness(img)>90: + #if resolution_measurement(img) == (383,524) and blurr_detection(img)>100 and brightness(img)>90: + if blurr_detection(img) > 100 and brightness(img) > 90: print("image quality is acceptable") return 1 else: print('image was not accepted') return 0 -check_image_quality() +check_image_quality(image_path) diff --git a/functions.py b/functions.py index db5019e..4436061 100644 --- a/functions.py +++ b/functions.py @@ -5,6 +5,14 @@ from keras.applications.inception_v3 import preprocess_input import random def get_image(image_path, preprocessing=False, resize=False, shape= (224,224,3)): + """ + + :param image_path: Path to the image file + :param preprocessing: if it's set to true, preprocessing will be performed, it's by default false + :param resize: Set to true if resizing is needed + :param shape: Image size for resizing + :return: the 3d tensor containing the image information + """ image_string = tf.io.read_file(image_path) image = tf.image.decode_jpeg(image_string, channels=3) @@ -20,6 +28,12 @@ def preprocess_images(anchor_img, positive_img, negative_img): def split_dataset(directory, split=0.9): + """ + + :param directory: Path to the directory containing the dataset + :param split: The percentage of data that will be used in training. + :return: returns two dictionaries in the following shape : {'label', 'number of images corresponding to that label} + """ all_files = os.listdir(directory) len_train = int(len(all_files) * split) random.shuffle(all_files) @@ -47,7 +61,15 @@ def split_dataset(directory, split=0.9): return train_list, test_list def create_triplets(directory, folder_list, max_files=20): - "Create triplets from the generated dataset lists." + """ + + :param directory: Path to the directory of the dataset + :param folder_list: this is the dictionary given by the 'split dataset function' + :param max_files: maximum number of files that can be used for triplet creation from each label + :return: list of tuples, each tuple containing 3 tuples (anchor, positive, negative). + Tuples are in the following form : ('label', 'filename') + """ + "Creates triplets from the generated dataset lists." triplets = [] list_folders = list(folder_list.keys()) @@ -73,6 +95,12 @@ def create_triplets(directory, folder_list, max_files=20): return triplets def Generate_dataset(list, Path): + """ + + :param list: This is the triplet list + :param Path: Path to the dataset + :return: returns a tensorflow dataset of images. + """ "this function will create a tf dataset " anchor_label = [] positive_label = [] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e99009c71a7fee96cc3da6a745e83213c03bec67 GIT binary patch literal 2852 zcmezWFOeaMp_n0uL6@O`p_0Lt!Ir^@L65<ZL65<JftP`cA(5e&p@gB7A&;SeA(5eo zp_m~RtO}&Uj6sjV7_2IZA(bJKp_HM7A(J7Ep_Czqp_n0`p_HM3!33<@1Z*bAWQh7? zhD3&Bh75*Oh7yK+hJ3Jkklm(W6ZII3z@{cMq%ssi&CCRwW5i&<V8mbyHVxzgLk4rG z$_%*cN*Ht*@)+_NiWqVk5*czBG8wAC=77ux=>%bz`S}caU>B4!6v6!uG8g1hi2sur zDjAX)a^NNyfb9m!8-i_4W5{7hWGG=sVkl)uV@Lz5D~7riWE03oMhs?9wGjJI{R|2l z69#hzn6J~JVGeRH$TpA(CSd*P;IK*uyG9op-WgE4K)OL@KwOPor7lB0To))@lE8XV z{fnv-6z8D$PXwz3xd!BOm>r-nOJ>Mq$Op@U%rRpCse#C6Fqp!_6ciGMNOGABDGYfG z5K}>NknqW5$Ym%1$6N_G-jWz}8FImCB#|Kn9Lf+K5PyPFDJX?v(^bTf3Qd7U44_m5 zi4jnG1-TLub0Bv^Qfv;~{UFyvQfL-KK0^}J6`+^^=>xeGl5(=a@eIlvAURN&7&3t3 z3F7l?hD?TXhD@*vau~|MYC-BiW`JVb6daq77yzZMM20+ubf_CZZUngk5+XSa`3#`E zg6cwuEjbJo47m&t8Iau|(;+gx;JlE{kiwA9P|lDCHW}o0kSd7#VJbj5G?*a~oNhrb zg!n{{0g~Q8`KN@TfC1zRSd4&Ffy}air&o|0bQw|@N*F2`3c%qGQUme>$TWzHav4e( zk{R-#ZUM=GTnmYZJcd$myhBPKkT@v6z)BWuDHdcu$ZUvT@)-&kk{L>&sR?AZB?HKH zhS0JJB%jKV$B@iW1})`E7%~_@IT7SeP<&W|OFKOV3uu`K(g7+(QW-KC@)(N2?gOa> z*$cuDKY{W_GD9{)B11Y@9V7-Jr4z(uAQgGwlnY6J5I4h0R*)>nC6IIgG9To3kozIw zQoxYOkOL0;a&Qbn@(;)inEgcz`QXwEQucsM0Qm;u0!R#k%5+ev4N?g+1(XUvw!loO z1eZ5?42ED^Kr$c|FgGA%L3uHsAq8BzgK`(hK9FkU5CNqCkbT8SAp(*I`3zwqD3yTn zCCKLmU|T`*pg4ntTO|W1WI%BQN?D-tH5FRBK+-!X6wuXIflWfx5FmR&r3t9~g4hiz z6$%+j!D+OF0TCz0NcA1M8r)?j$b6W&#n99T3V%y*?k;8km5iX00F;tI^#>?iAbAnw zTaZc+jp{;(E>Nt2LKh@&2(FJIaRsV-LG?ALZiAQv3V%>K2bKPyvJe#R72t4y=!KLf z5E)2)3{sH_E|WkufWi?{8Wn@{ZYnq*gJKutYEan(t9L=AEi6wKGZdl32FNauOCTj@ z2?NNDpf&&`-az(%WFff><dzZ!P<{lp5Q-R*z;#j)Lkif{AooB@Qb@>RR|!iKi43~n z_DB)fKSf{@KsJHu2S`dn)(@(wLFFOHJqUXsA&X6&E;OBh>KBktK%q{&UQo#aG8K_t z4UuXHkPN6~0htMNmoB&_)n!O$NM<Nz&}B#i=YLRKRD#=JsSLSrA0nq%kXfL%PBOS1 z1u_xlN>Cbu`7eV3l(IqfC@95&T3pEtpfVk#7L<}fAp%pI53g51p$RK@ATbBZ;UK@I zGsH8bGE^{tYzO%SRGWfmka-|`kz>0GTn2*D1xOZ=Lm_1psO<$R=Q0_R7>vO_1i1y0 zra*RvgX_<1a4W79T2F#p3~?Js1*onC<up*alERP)&5IxtKrVs!s+^&SA(5egp#*FL cNY0P}BnwGrRScO71q_g~2xK;>T?s3v0Z7K8&Hw-a literal 0 HcmV?d00001 diff --git a/test_model/capture_image.py b/test_model/capture_image.py index a5131aa..c432df1 100644 --- a/test_model/capture_image.py +++ b/test_model/capture_image.py @@ -25,14 +25,21 @@ while True: #if you press space img_name= f"test_image_new{img_counter}.jpg" img = cv2.imwrite(img_name, frame) - image_path = directory_path + f"test_image_new{img_counter}.jpg" - print(image_path) + print(f"image {img_name}was saved") img_counter+= 1 cam.release() cv2.destroyAllWindows() -print(img) +image_path = os.path.join(directory_path, img_name) +print(image_path) + def crop_image(image_path): + """ + + :param image_path: Path to the image + :return: tuple containing a boolean indicating successful detection and the cropped + image if successful otherwise (False, None) + """ detector = MTCNN() img = cv2.imread(image_path) data = detector.detect_faces(img) @@ -59,4 +66,5 @@ if x==1 : print("image was accepted") cv2.imwrite("test_image_new.jpg", img) else: + print('image was not accepted') \ No newline at end of file diff --git a/test_model/convert_tf_lite.py b/test_model/convert_tf_lite.py index b4d145a..3f43b04 100644 --- a/test_model/convert_tf_lite.py +++ b/test_model/convert_tf_lite.py @@ -8,6 +8,11 @@ from keras.applications import Xception from keras.models import Model, Sequential #os.chdir(r"C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras") def image_embedder(input_shape): + """ + + :param input_shape: take the input shape which the CNN will expect + :return: The convolutional neural network that will be used in embedding. + """ """ Returns the convolutional neural network that will generate the encodings of each picture """ pretrained_model = Xception( diff --git a/test_model/test_model.py b/test_model/test_model.py index 81d432a..f2581a4 100644 --- a/test_model/test_model.py +++ b/test_model/test_model.py @@ -13,26 +13,34 @@ from pathlib import Path from sklearn.metrics import accuracy_score, confusion_matrix,ConfusionMatrixDisplay -#from sklearn.metrics.ConfusionMatrixDisplay import plot_confusion_matrix + directory_path = os.getcwd() Path = os.path.join(directory_path, 'Extracted Faces') print(Path) -#Path = r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\Dataset_tools\Extracted Faces' -#Path =r'G:\datasets\new_dat' random.seed(5) np.random.seed(5) tf.random.set_seed(5) folders = os.listdir(Path) def read_imagee(index): + """ + + :param index: takes the index of an image list . + :return:a 3D tensor of the following shape will be returned [height, width, channels] + """ image_path = os.path.join(Path, index[0], index[1]) image_string = tf.io.read_file(image_path) image = tf.image.decode_jpeg(image_string, channels=3) image = tf.image.convert_image_dtype(image, tf.float32) - #image = tf.image.resize(image, (224,224)) + image = tf.image.resize(image, (224,224)) def read_image(index): + """ + + :param index: takes the index to an image that contains the filename and path. + :return: a 3D tensor of the following shape will be returned [height, width, channels] + """ path = os.path.join(Path, index[0], index[1]) image = cv2.imread(path, cv2.COLOR_BGR2RGB) @@ -52,6 +60,13 @@ test_triplet = create_triplets(Path, test_list, max_files=2) print(f'this is the Triplet list {test_triplet}') def batch_generator(triplet_list, batch_size=256, preprocessing = False): + """ + + :param triplet_list: this is list containing the tripelets + :param batch_size: size of the image batch + :param preprocessing: specifies if any preprocessing is applied + :return: returns a batch of images (images are in numpy array form). + """ batch_step = len(triplet_list)//batch_size for i in range(batch_step+1): @@ -76,6 +91,11 @@ def batch_generator(triplet_list, batch_size=256, preprocessing = False): yield ([anchor, positive, negative]) def data_generator(triplet_list): + """ + + :param triplet_list: list of triplets in the following form (anchor, positive, negative) + :return: returns a tuple containing the images that are in numpy array form. + """ batch_step = len(triplet_list) anchor = [] positive = [] @@ -97,12 +117,20 @@ def data_generator(triplet_list): encoder = image_embedder((224,224,3)) -encoder.load_weights(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras') +encoder.load_weights(os.path.join('model', 'encoder.keras')) def classify_images(face_list1, face_list2, threshold=1.2): + """ + + :param face_list1: This is a list of images can be anchor, positive or negative + :param face_list2: this is a list of images, can be anchor , positive or negative + :param threshold: A value for the euclidean distance that determines if two images + belong to the same class. + :return: + """ # Getting the encodings for the passed faces embeddings1 = encoder.predict(face_list1) embeddings2 = encoder.predict(face_list2) @@ -113,6 +141,12 @@ def classify_images(face_list1, face_list2, threshold=1.2): def ModelMetrics(pos_list, neg_list): + """ + + :param pos_list: Takes a list containing the predictions of the model on anchor and positive samples + :param neg_list:Takes a list containing the predictions of the model on anchor and negative samples + :return: a confusion matrix + """ true = np.array([0] * len(pos_list) + [1] * len(neg_list)) pred = np.append(pos_list, neg_list) diff --git a/test_model/use_model.py b/test_model/use_model.py index 5151b0e..1347f07 100644 --- a/test_model/use_model.py +++ b/test_model/use_model.py @@ -14,10 +14,11 @@ from facial_recognition.Siamese_model import image_embedder, get_siamese_network directory_path = os.getcwd() database_path = os.path.join(directory_path, 'database') +encoder_path = os.path.join(directory_path, 'encoder.keras') -#parser = argparse.ArgumentParser(description='gives filename') -#parser.add_argument('file_name', metavar='file_name', type=str, help='gives filename') -#args = parser.parse_args() +parser = argparse.ArgumentParser(description='gives filename') +parser.add_argument('file_name', metavar='file_name', type=str, help='gives filename') +args = parser.parse_args() def read_image(Path): @@ -33,8 +34,15 @@ def read_image(Path): encoder = image_embedder((224,224,3)) -encoder.load_weights(r'C:\Users\waelk\PycharmProjects\facial_recognition\facial_recognition\test_model\model\encoder.keras') +encoder.load_weights(encoder_path) def classify_images(face_list1, face_list2, threshold=1.3): + """ + + :param face_list1: + :param face_list2: + :param threshold: + :return: + """ # Getting the encodings for the passed faces tensor1 = encoder.predict(face_list1) tensor2 = encoder.predict(face_list2) @@ -45,8 +53,8 @@ def classify_images(face_list1, face_list2, threshold=1.3): return 1 else: return 0 -#file_name = args.file_name -test_path = os.path.join(directory_path, 'test_image_2.jpg') +file_name = args.file_name +test_path = os.path.join(directory_path, file_name) img1 = np.array([read_image(Path=test_path)]) result = {} for persons in os.listdir(database_path): diff --git a/test_model/use_tflite.py b/test_model/use_tflite.py index 4c903d3..ec24d0b 100644 --- a/test_model/use_tflite.py +++ b/test_model/use_tflite.py @@ -28,6 +28,11 @@ database_path = os.path.join(directory_path, 'database') def read_image(Path): + """ + + :param Path: Path to the image that will be read. + :return: Returns a 3D tensor containing the image data in the following shape [widht,height,channels]. + """ image = cv2.imread(Path, cv2.COLOR_BGR2RGB) @@ -93,6 +98,13 @@ output_data2 = interpreter.get_tensor(output_details[0]['index']) print(output_data2) print(output_data) def classify_images(face_list1, face_list2, threshold=1.2): + """ + + :param face_list1: a list of labeled imgaes + :param face_list2: + :param threshold: + :return: + """ # Getting the encodings for the passed faces tensor1 = interpreter.set_tensor(input_details[0]['index'], [face_list1]) diff --git a/train_model_tripletloss.py b/train_model_tripletloss.py index 468851b..091ef57 100644 --- a/train_model_tripletloss.py +++ b/train_model_tripletloss.py @@ -10,7 +10,7 @@ from functions import split_dataset, Generate_dataset, create_triplets random.seed(5) np.random.seed(5) tf.random.set_seed(5) -Path = "replace this with link to dataset" +Path = "replace this with Path to dataset" physical_devices = tf.config.list_physical_devices('GPU') for gpu_instance in physical_devices: @@ -23,6 +23,7 @@ train_dataset = Generate_dataset(Path=Path,list=train_triplet) train_dataset = train_dataset.shuffle(buffer_size=1024) train_dataset = train_dataset.batch(2048, drop_remainder=False) train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE) +print(type(train_dataset)) @@ -34,23 +35,23 @@ strategy = tf.distribute.MirroredStrategy(gpus) with strategy.scope(): emb_mod, model= get_siamese_network([224 ,224, 3]) - checkpoint_path = r"/home/khlifi/Documents/model_semihard_triplet_loss/all/allweights_1024b_preprocessing/max_55.keras" + checkpoint_path = 'replace with path to checkpoint' model.compile(optimizer=tf.keras.optimizers.Adam(0.0001),loss=tfa.losses.TripletSemiHardLoss(margin=0.3)) cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1) history = model.fit(train_dataset, epochs=15, callbacks=[cp_callback]) hist_df = pd.DataFrame(history.history) - hist_json_file = '/home/khlifi/Documents/more_data_preprocessing_on/all/history_2048_50.json' + hist_json_file = 'replace with path to history' with open(hist_json_file, mode='w') as f: hist_df.to_json(f) - hist_csv_file = '/home/khlifi/Documents/more_data_preprocessing_on/all/history_2048_50.csv' + hist_csv_file = 'replace with path to history' with open(hist_csv_file, mode='w') as f: hist_df.to_csv(f) -model.save('/home/khlifi/Documents/model_semihard_triplet_loss/final/2048_batch_pre_on_max_50.keras') +model.save('replace with path to directory ') -- GitLab