"peekOfCode":"def extract_zip(zip_file_name: str, save_dir: str):\n\"\"\"\n Extract .zip folder, if not already done\n\"\"\"\n parts = zip_file_name.split(\".\")\n file_name = parts[0]\n if not os.path.isdir(file_name):\n with zipfile.ZipFile(zip_file_name, \"r\") as zip_ref:\n zip_ref.extractall(save_dir)\n print(f\"Folder '{zip_file_name}' was successfully extracted\")",
"peekOfCode":"def create_folder(dir: str):\n\"\"\"\n Check if folder already exists\n If not create new folder\n\"\"\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n# ------------------------------\n# Save model as .pt\n# ------------------------------",
"detail":"main",
"documentation":{}
},
{
"label":"save_model",
"kind":2,
"importPath":"main",
"description":"main",
"peekOfCode":"def save_model(model, save_dir):\n\"\"\"\n Export model to TorchScript and save it\n\"\"\"\n if not os.path.exists(save_dir):\n model_scripted = torch.jit.script(model)\n model_scripted.save(save_dir)\n# ------------------------------\n# Get parameters\n# ------------------------------",
"detail":"main",
"documentation":{}
},
{
"label":"get_parameters",
"kind":2,
"importPath":"main",
"description":"main",
"peekOfCode":"def get_parameters(model) -> NDArrays:\n\"\"\"\n Return model parameters as a list of NumPy ndarrays\n\"\"\"\n return [val.cpu().numpy() for _, val in model.state_dict().items()]\n# ------------------------------\n# Set parameters\n# ------------------------------\ndef set_parameters(model, parameters: NDArrays) -> None:\n\"\"\"",
"detail":"main",
"documentation":{}
},
{
"label":"set_parameters",
"kind":2,
"importPath":"main",
"description":"main",
"peekOfCode":"def set_parameters(model, parameters: NDArrays) -> None:\n\"\"\"\n Set model parameters from a list of NumPy ndarrays\n\"\"\"\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n return\n# ------------------------------\n# Implement Flower client",
"detail":"main",
"documentation":{}
},
{
"label":"evaluate",
"kind":2,
"importPath":"main",
"description":"main",
"peekOfCode":"def evaluate(server_round: int,\n parameters: NDArrays,\n config: Dict[str, Scalar],\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n\"\"\"\n The evaluate function will be called by Flower after each server round\n\"\"\"\n # create folder\n save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"], \"global_model\")\n create_folder(save_dir)",
"detail":"main",
"documentation":{}
},
{
"label":"client_fn",
"kind":2,
"importPath":"main",
"description":"main",
"peekOfCode":"def client_fn(cid: str) -> FlowerClient:\n\"\"\"\n Create a Flower client instance. Each client will\n be trained and validated on his own unique data\n\"\"\"\n # load pretrained model\n model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\n # specify output folder\n root_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n save_dir = os.path.join(root_dir, f\"client_{int(cid)+1}\")",
"detail":"main",
"documentation":{}
},
{
"label":"fit_config",
"kind":2,
"importPath":"main",
"description":"main",
"peekOfCode":"def fit_config(server_round: int) -> Dict[str, Scalar]:\n\"\"\"\n Return training configuration dict for each round\n\"\"\"\n config = {\n\"server_round\": server_round,\n\"epochs\": CONFIG[\"epochs\"],\n\"optimizer\": CONFIG[\"optimizer\"],\n\"momentum\": CONFIG[\"momentum\"],\n\"lr\": CONFIG[\"lr\"],",
"peekOfCode":"def extract_zip(zip_file_name: str, save_dir: str):\n\"\"\"\n Extract .zip folder, if not already done\n\"\"\"\n parts = zip_file_name.split(\".\")\n file_name = parts[0]\n if not os.path.isdir(file_name):\n with zipfile.ZipFile(zip_file_name, \"r\") as zip_ref:\n zip_ref.extractall(save_dir)\n print(f\"Folder '{zip_file_name}' was successfully extracted\")",
"peekOfCode":"def create_folder(dir: str):\n\"\"\"\n Check if folder already exists\n If not create new folder\n\"\"\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n# ------------------------------\n# Save model as .pt\n# ------------------------------",
"detail":"main_esaim",
"documentation":{}
},
{
"label":"save_model",
"kind":2,
"importPath":"main_esaim",
"description":"main_esaim",
"peekOfCode":"def save_model(model, save_dir):\n\"\"\"\n Export model to TorchScript and save it\n\"\"\"\n if not os.path.exists(save_dir):\n model_scripted = torch.jit.script(model)\n model_scripted.save(save_dir)\n# ------------------------------\n# Get parameters\n# ------------------------------",
"detail":"main_esaim",
"documentation":{}
},
{
"label":"get_parameters",
"kind":2,
"importPath":"main_esaim",
"description":"main_esaim",
"peekOfCode":"def get_parameters(model) -> NDArrays:\n\"\"\"\n Return model parameters as a list of NumPy ndarrays\n\"\"\"\n return [val.cpu().numpy() for _, val in model.state_dict().items()]\n# ------------------------------\n# Set parameters\n# ------------------------------\ndef set_parameters(model, parameters: NDArrays) -> None:\n\"\"\"",
"detail":"main_esaim",
"documentation":{}
},
{
"label":"set_parameters",
"kind":2,
"importPath":"main_esaim",
"description":"main_esaim",
"peekOfCode":"def set_parameters(model, parameters: NDArrays) -> None:\n\"\"\"\n Set model parameters from a list of NumPy ndarrays\n\"\"\"\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n return\n# ------------------------------\n# Implement Flower client",
"detail":"main_esaim",
"documentation":{}
},
{
"label":"evaluate",
"kind":2,
"importPath":"main_esaim",
"description":"main_esaim",
"peekOfCode":"def evaluate(server_round: int,\n parameters: NDArrays,\n config: Dict[str, Scalar],\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n\"\"\"\n The evaluate function will be called by Flower after each server round\n\"\"\"\n # create folder\n save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"], \"global_model\")\n create_folder(save_dir)",
"detail":"main_esaim",
"documentation":{}
},
{
"label":"client_fn",
"kind":2,
"importPath":"main_esaim",
"description":"main_esaim",
"peekOfCode":"def client_fn(cid: str) -> FlowerClient:\n\"\"\"\n Create a Flower client instance. Each client will\n be trained and validated on his own unique data\n\"\"\"\n # load pretrained model\n model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\n # specify output folder\n root_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n save_dir = os.path.join(root_dir, f\"client_{int(cid)+1}\")",
"detail":"main_esaim",
"documentation":{}
},
{
"label":"fit_config",
"kind":2,
"importPath":"main_esaim",
"description":"main_esaim",
"peekOfCode":"def fit_config(server_round: int) -> Dict[str, Scalar]:\n\"\"\"\n Return training configuration dict for each round\n\"\"\"\n config = {\n\"server_round\": server_round,\n\"epochs\": CONFIG[\"epochs\"],\n\"optimizer\": CONFIG[\"optimizer\"],\n\"momentum\": CONFIG[\"momentum\"],\n\"lr\": CONFIG[\"lr\"],",
"peekOfCode":"def extract_zip(zip_file_name: str, save_dir: str):\n\"\"\"\n Extract .zip folder, if not already done\n\"\"\"\n parts = zip_file_name.split(\".\")\n file_name = parts[0]\n if not os.path.isdir(file_name):\n with zipfile.ZipFile(zip_file_name, \"r\") as zip_ref:\n zip_ref.extractall(save_dir)\n print(f\"Folder '{zip_file_name}' was successfully extracted\")",
"peekOfCode":"def create_folder(dir: str):\n\"\"\"\n Check if folder already exists\n If not create new folder\n\"\"\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n# ------------------------------\n# Save model as .pt\n# ------------------------------",
"detail":"main_flta",
"documentation":{}
},
{
"label":"save_model",
"kind":2,
"importPath":"main_flta",
"description":"main_flta",
"peekOfCode":"def save_model(model, save_dir):\n\"\"\"\n Export model to TorchScript and save it\n\"\"\"\n if not os.path.exists(save_dir):\n model_scripted = torch.jit.script(model)\n model_scripted.save(save_dir)\n# ------------------------------\n# Get parameters\n# ------------------------------",
"detail":"main_flta",
"documentation":{}
},
{
"label":"get_parameters",
"kind":2,
"importPath":"main_flta",
"description":"main_flta",
"peekOfCode":"def get_parameters(model) -> NDArrays:\n\"\"\"\n Return model parameters as a list of NumPy ndarrays\n\"\"\"\n return [val.cpu().numpy() for _, val in model.state_dict().items()]\n# ------------------------------\n# Set parameters\n# ------------------------------\ndef set_parameters(model, parameters: NDArrays) -> None:\n\"\"\"",
"detail":"main_flta",
"documentation":{}
},
{
"label":"set_parameters",
"kind":2,
"importPath":"main_flta",
"description":"main_flta",
"peekOfCode":"def set_parameters(model, parameters: NDArrays) -> None:\n\"\"\"\n Set model parameters from a list of NumPy ndarrays\n\"\"\"\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n return\n# ------------------------------\n# Implement Flower client",
"detail":"main_flta",
"documentation":{}
},
{
"label":"evaluate",
"kind":2,
"importPath":"main_flta",
"description":"main_flta",
"peekOfCode":"def evaluate(server_round: int,\n parameters: NDArrays,\n config: Dict[str, Scalar],\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n\"\"\"\n The evaluate function will be called by Flower after each server round\n\"\"\"\n # create folder\n save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"], \"global_model\")\n create_folder(save_dir)",
"detail":"main_flta",
"documentation":{}
},
{
"label":"client_fn",
"kind":2,
"importPath":"main_flta",
"description":"main_flta",
"peekOfCode":"def client_fn(cid: str) -> FlowerClient:\n\"\"\"\n Create a Flower client instance. Each client will\n be trained and validated on his own unique data\n\"\"\"\n # load pretrained model\n model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\n # specify output folder\n root_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n save_dir = os.path.join(root_dir, f\"client_{int(cid)+1}\")",
"detail":"main_flta",
"documentation":{}
},
{
"label":"fit_config",
"kind":2,
"importPath":"main_flta",
"description":"main_flta",
"peekOfCode":"def fit_config(server_round: int) -> Dict[str, Scalar]:\n\"\"\"\n Return training configuration dict for each round\n\"\"\"\n config = {\n\"server_round\": server_round,\n\"epochs\": CONFIG[\"epochs\"],\n\"optimizer\": CONFIG[\"optimizer\"],\n\"momentum\": CONFIG[\"momentum\"],\n\"lr\": CONFIG[\"lr\"],",
"peekOfCode":"class PlainFedAvg(fl.server.strategy.FedAvg):\n def aggregate_fit(self,\n server_round: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n\"\"\"\n Aggregate fit results using plain average\n\"\"\"\n if not results:",
"detail":"strategy",
"documentation":{}
},
{
"label":"aggregate_plain_average",
"kind":2,
"importPath":"strategy",
"description":"strategy",
"peekOfCode":"def aggregate_plain_average(results: List[Tuple[NDArrays, int]]) -> NDArrays:\n\"\"\"\n Compute plain average\n\"\"\"\n weights_list = [weights for weights, _ in results]\n # Compute average weights of each layer\n weights_prime: NDArrays = [\n reduce(np.add, layer_updates) / len(weights_list)\n for layer_updates in zip(*weights_list)\n ]",
"detail":"strategy",
"documentation":{}
},
{
"label":"read_config",
"kind":2,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"def read_config(config_file):\n with open(config_file, 'r') as f:\n config = yaml.safe_load(f)\n return config\n# Function to set up model architecture with configurable last layers\ndef setup_model(num_classes, model_name):\n model_classes = {\n\"EfficientNet\": efficientnet_v2_s,\n\"VGG\": vgg19,\n\"ResNet\": resnet50,",
"detail":"test_global_weight",
"documentation":{}
},
{
"label":"setup_model",
"kind":2,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"def setup_model(num_classes, model_name):\n model_classes = {\n\"EfficientNet\": efficientnet_v2_s,\n\"VGG\": vgg19,\n\"ResNet\": resnet50,\n\"InceptionNet\": inception_v3,\n\"DenseNet\": densenet121\n }\n model = model_classes[model_name](pretrained=False)\n if model_name in [\"VGG\", \"EfficientNet\"]:",
"detail":"test_global_weight",
"documentation":{}
},
{
"label":"load_weights",
"kind":2,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"def load_weights(model, weights_path):\n loaded_model = torch.jit.load(weights_path) # Load the TorchScript model\n model.load_state_dict(loaded_model.state_dict()) # Copy the parameters from the loaded model to the provided model\n model.eval() # Set the model to evaluation mode\nclass_map_str_to_idx = {\n\"No_windshield\": 0,\n\"With_windshield\": 1\n}\n# Function to test the model\ndef test_model(model, test_loader):",
"detail":"test_global_weight",
"documentation":{}
},
{
"label":"test_model",
"kind":2,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"def test_model(model, test_loader):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n criterion = nn.CrossEntropyLoss()\n all_predictions = []\n all_targets = []\n all_confidences = [] # Store confidences\n with torch.no_grad(), tqdm(total=len(test_loader), desc=\"Running tests\") as pbar:\n for images, labels in test_loader:\n images, labels = images.to(device), labels.to(device)",
"detail":"test_global_weight",
"documentation":{}
},
{
"label":"save_results",
"kind":2,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"def save_results(output_folder, all_predictions, all_targets, metrics, config, test_dataset, all_confidences):\n # Create output folder if it doesn't exist\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n # Create folders based on class mapping\n for class_name in class_map_str_to_idx.keys():\n class_folder = os.path.join(output_folder, class_name)\n os.makedirs(class_folder, exist_ok=True)\n # Move images to respective class folders based on predictions\n for image_path, prediction, target, confidence in zip(test_dataset.imgs, all_predictions, all_targets, all_confidences):",
"detail":"test_global_weight",
"documentation":{}
},
{
"label":"main",
"kind":2,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"def main(config_file):\n # Read configuration\n config = read_config(config_file)\n # Set up output folder\n output_folder = config['output_folder']\n if os.path.exists(output_folder):\n print(\"Output folder already exists. Exiting.\")\n return\n # Set hyperparameters\n num_classes = config['num_classes']",
"detail":"test_global_weight",
"documentation":{}
},
{
"label":"class_map_str_to_idx",
"kind":5,
"importPath":"test_global_weight",
"description":"test_global_weight",
"peekOfCode":"class_map_str_to_idx = {\n\"No_windshield\": 0,\n\"With_windshield\": 1\n}\n# Function to test the model\ndef test_model(model, test_loader):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n criterion = nn.CrossEntropyLoss()\n all_predictions = []",
"peekOfCode":"save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n# check if folder already exists\nif os.path.exists(save_dir):\n print(\"Output folder already exists. Exiting the program.\")\n sys.exit()\n# create folder\ncreate_folder(save_dir)\nprint(\"Output folder created.\")\n# move copy of config file to output folder\nshutil.copy(config_file_name, os.path.join(save_dir, config_file_name))",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"cid",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"cid = CONFIG[\"client_id\"]\n# set seeds\nset_seeds(CONFIG[\"seed\"])\nprint(\"Seeds set.\")\n# load pretrained model\nmodel = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\nprint(\"Pretrained model loaded.\")\n# load data\nprint(\"Loading training and validation data...\")\ntrain_dataloader, val_dataloader = load_train_val_data(",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"model",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\nprint(\"Pretrained model loaded.\")\n# load data\nprint(\"Loading training and validation data...\")\ntrain_dataloader, val_dataloader = load_train_val_data(\n os.path.join(\"dataset/ESAIM\", f\"Client{int(cid)}\"),\n CONFIG[\"batch_size\"],\n CONFIG[\"img_size\"],\n)\nprint(\"Training and validation data loaded.\")",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"test_dataloader",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"test_dataloader = load_test_data(\n\"dataset/ESAIM/global_test_set\",\n CONFIG[\"batch_size\"],\n CONFIG[\"img_size\"],\n)\nprint(\"Test data loaded.\")\n# save one batch of training data\nimgs, _ = next(iter(train_dataloader))\ntrain_batch0 = make_grid(imgs, nrow=4, scale_each=True)\nsave_image(train_batch0, os.path.join(save_dir, \"train_batch0\"), \"png\")",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"train_batch0",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"train_batch0 = make_grid(imgs, nrow=4, scale_each=True)\nsave_image(train_batch0, os.path.join(save_dir, \"train_batch0\"), \"png\")\nprint(\"One batch of training data saved as image.\")\n# specify optimizer\noptimizer = get_optimizer(model, CONFIG[\"optimizer\"])\nprint(\"Optimizer specified.\")\n# specify loss function\ncriterion = get_criterion(CONFIG[\"loss_fcn\"])\nprint(\"Loss function specified.\")\n# specify device",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"optimizer",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"optimizer = get_optimizer(model, CONFIG[\"optimizer\"])\nprint(\"Optimizer specified.\")\n# specify loss function\ncriterion = get_criterion(CONFIG[\"loss_fcn\"])\nprint(\"Loss function specified.\")\n# specify device\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n# train model\nprint(\"Training the model...\")",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"criterion",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"criterion = get_criterion(CONFIG[\"loss_fcn\"])\nprint(\"Loss function specified.\")\n# specify device\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n# train model\nprint(\"Training the model...\")\nmetrics_dict = train_model(\n model,\n train_dataloader,",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"device",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n# train model\nprint(\"Training the model...\")\nmetrics_dict = train_model(\n model,\n train_dataloader,\n val_dataloader,\n optimizer,\n CONFIG[\"epochs\"],",
"peekOfCode":"best_epoch_weights_path = os.path.join(save_dir, \"best.pt\")\nif os.path.exists(best_epoch_weights_path):\n print(\"Loading best epoch weights...\")\n model.load_state_dict(torch.jit.load(best_epoch_weights_path).state_dict())\n print(\"Best epoch weights loaded successfully.\")\nelse:\n print(\"Best epoch weights not found. Using final weights for testing.\")\n# test model\nprint(\"Testing the model...\")\ntest_acc, test_loss, confusion_matrix = test_model(",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"num_epochs_completed",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"num_epochs_completed = len(metrics_dict.get(\"train_loss\", []))\n# Add test metrics to dict and pad the lists\nmetrics_dict[\"test_acc\"] = [0] * (num_epochs_completed - 1) + [test_acc]\nmetrics_dict[\"test_loss\"] = [0] * (num_epochs_completed - 1) + [test_loss]\nmetrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"metrics_dict[\"test_acc\"]",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"metrics_dict[\"test_acc\"] = [0] * (num_epochs_completed - 1) + [test_acc]\nmetrics_dict[\"test_loss\"] = [0] * (num_epochs_completed - 1) + [test_loss]\nmetrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"metrics_dict[\"test_loss\"]",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"metrics_dict[\"test_loss\"] = [0] * (num_epochs_completed - 1) + [test_loss]\nmetrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"metrics_dict[\"precision\"]",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"metrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"metrics_dict[\"recall\"]",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"metrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail":"train_and_test",
"documentation":{}
},
{
"label":"metrics_dict[\"f1\"]",
"kind":5,
"importPath":"train_and_test",
"description":"train_and_test",
"peekOfCode":"metrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",