Skip to content
Snippets Groups Projects
Commit 0a590f83 authored by Dalim's avatar Dalim
Browse files

add index and hidden file .gitlab-ci.yml for webpage mermaid

parent c2d36a15
Branches main
No related tags found
No related merge requests found
Pipeline #148290 passed
image: node:latest
pages:
script:
- mkdir .public
- cp index.html .public/
artifacts:
paths:
- .public
\ No newline at end of file
[
{
"label": "zipfile",
"kind": 6,
"isExtraImport": true,
"importPath": "zipfile",
"description": "zipfile",
"detail": "zipfile",
"documentation": {}
},
{
"label": "yaml",
"kind": 6,
"isExtraImport": true,
"importPath": "yaml",
"description": "yaml",
"detail": "yaml",
"documentation": {}
},
{
"label": "OrderedDict",
"importPath": "collections",
"description": "collections",
"isExtraImport": true,
"detail": "collections",
"documentation": {}
},
{
"label": "OrderedDict",
"importPath": "collections",
"description": "collections",
"isExtraImport": true,
"detail": "collections",
"documentation": {}
},
{
"label": "OrderedDict",
"importPath": "collections",
"description": "collections",
"isExtraImport": true,
"detail": "collections",
"documentation": {}
},
{
"label": "Dict",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "List",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Optional",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Tuple",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Union",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Dict",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "List",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Optional",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Tuple",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Union",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Dict",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "List",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Optional",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Tuple",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Union",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Dict",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "List",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Optional",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Tuple",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "Union",
"importPath": "typing",
"description": "typing",
"isExtraImport": true,
"detail": "typing",
"documentation": {}
},
{
"label": "os",
"kind": 6,
"isExtraImport": true,
"importPath": "os",
"description": "os",
"detail": "os",
"documentation": {}
},
{
"label": "sys",
"kind": 6,
"isExtraImport": true,
"importPath": "sys",
"description": "sys",
"detail": "sys",
"documentation": {}
},
{
"label": "glob",
"kind": 6,
"isExtraImport": true,
"importPath": "glob",
"description": "glob",
"detail": "glob",
"documentation": {}
},
{
"label": "shutil",
"kind": 6,
"isExtraImport": true,
"importPath": "shutil",
"description": "shutil",
"detail": "shutil",
"documentation": {}
},
{
"label": "deepcopy",
"importPath": "copy",
"description": "copy",
"isExtraImport": true,
"detail": "copy",
"documentation": {}
},
{
"label": "deepcopy",
"importPath": "copy",
"description": "copy",
"isExtraImport": true,
"detail": "copy",
"documentation": {}
},
{
"label": "deepcopy",
"importPath": "copy",
"description": "copy",
"isExtraImport": true,
"detail": "copy",
"documentation": {}
},
{
"label": "numpy",
"kind": 6,
"isExtraImport": true,
"importPath": "numpy",
"description": "numpy",
"detail": "numpy",
"documentation": {}
},
{
"label": "random",
"kind": 6,
"isExtraImport": true,
"importPath": "random",
"description": "random",
"detail": "random",
"documentation": {}
},
{
"label": "torch",
"kind": 6,
"isExtraImport": true,
"importPath": "torch",
"description": "torch",
"detail": "torch",
"documentation": {}
},
{
"label": "torch.nn",
"kind": 6,
"isExtraImport": true,
"importPath": "torch.nn",
"description": "torch.nn",
"detail": "torch.nn",
"documentation": {}
},
{
"label": "lr_scheduler",
"importPath": "torch.optim",
"description": "torch.optim",
"isExtraImport": true,
"detail": "torch.optim",
"documentation": {}
},
{
"label": "lr_scheduler",
"importPath": "torch.optim",
"description": "torch.optim",
"isExtraImport": true,
"detail": "torch.optim",
"documentation": {}
},
{
"label": "lr_scheduler",
"importPath": "torch.optim",
"description": "torch.optim",
"isExtraImport": true,
"detail": "torch.optim",
"documentation": {}
},
{
"label": "Dataset",
"importPath": "torch.utils.data",
"description": "torch.utils.data",
"isExtraImport": true,
"detail": "torch.utils.data",
"documentation": {}
},
{
"label": "Dataset",
"importPath": "torch.utils.data",
"description": "torch.utils.data",
"isExtraImport": true,
"detail": "torch.utils.data",
"documentation": {}
},
{
"label": "Dataset",
"importPath": "torch.utils.data",
"description": "torch.utils.data",
"isExtraImport": true,
"detail": "torch.utils.data",
"documentation": {}
},
{
"label": "DataLoader",
"importPath": "torch.utils.data",
"description": "torch.utils.data",
"isExtraImport": true,
"detail": "torch.utils.data",
"documentation": {}
},
{
"label": "torchvision",
"kind": 6,
"isExtraImport": true,
"importPath": "torchvision",
"description": "torchvision",
"detail": "torchvision",
"documentation": {}
},
{
"label": "efficientnet_v2_s",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "vgg19",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "resnet50",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "inception_v3",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "densenet121",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "efficientnet_v2_s",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "vgg19",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "resnet50",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "inception_v3",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "densenet121",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "efficientnet_v2_s",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "vgg19",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "resnet50",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "inception_v3",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "densenet121",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "efficientnet_v2_s",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "vgg19",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "resnet50",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "inception_v3",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "densenet121",
"importPath": "torchvision.models",
"description": "torchvision.models",
"isExtraImport": true,
"detail": "torchvision.models",
"documentation": {}
},
{
"label": "make_grid",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "save_image",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "make_grid",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "save_image",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "make_grid",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "save_image",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "make_grid",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "save_image",
"importPath": "torchvision.utils",
"description": "torchvision.utils",
"isExtraImport": true,
"detail": "torchvision.utils",
"documentation": {}
},
{
"label": "cv2",
"kind": 6,
"isExtraImport": true,
"importPath": "cv2",
"description": "cv2",
"detail": "cv2",
"documentation": {}
},
{
"label": "flwr",
"kind": 6,
"isExtraImport": true,
"importPath": "flwr",
"description": "flwr",
"detail": "flwr",
"documentation": {}
},
{
"label": "Code",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "EvaluateIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "EvaluateRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "GetParametersIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "GetParametersRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Status",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "NDArrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Scalar",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "ndarrays_to_parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "parameters_to_ndarrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Code",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "EvaluateIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "EvaluateRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "GetParametersIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "GetParametersRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Status",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "NDArrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Scalar",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "ndarrays_to_parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "parameters_to_ndarrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Code",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "EvaluateIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "EvaluateRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "GetParametersIns",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "GetParametersRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Status",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "NDArrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Scalar",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "ndarrays_to_parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "parameters_to_ndarrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FitRes",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "NDArrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "Scalar",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "ndarrays_to_parameters",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "parameters_to_ndarrays",
"importPath": "flwr.common",
"description": "flwr.common",
"isExtraImport": true,
"detail": "flwr.common",
"documentation": {}
},
{
"label": "FedAvg",
"importPath": "flwr.server.strategy",
"description": "flwr.server.strategy",
"isExtraImport": true,
"detail": "flwr.server.strategy",
"documentation": {}
},
{
"label": "FedAvg",
"importPath": "flwr.server.strategy",
"description": "flwr.server.strategy",
"isExtraImport": true,
"detail": "flwr.server.strategy",
"documentation": {}
},
{
"label": "FedAvg",
"importPath": "flwr.server.strategy",
"description": "flwr.server.strategy",
"isExtraImport": true,
"detail": "flwr.server.strategy",
"documentation": {}
},
{
"label": "PlainFedAvg",
"importPath": "strategy",
"description": "strategy",
"isExtraImport": true,
"detail": "strategy",
"documentation": {}
},
{
"label": "PlainFedAvg",
"importPath": "strategy",
"description": "strategy",
"isExtraImport": true,
"detail": "strategy",
"documentation": {}
},
{
"label": "PlainFedAvg",
"importPath": "strategy",
"description": "strategy",
"isExtraImport": true,
"detail": "strategy",
"documentation": {}
},
{
"label": "WARNING",
"importPath": "logging",
"description": "logging",
"isExtraImport": true,
"detail": "logging",
"documentation": {}
},
{
"label": "log",
"importPath": "flwr.common.logger",
"description": "flwr.common.logger",
"isExtraImport": true,
"detail": "flwr.common.logger",
"documentation": {}
},
{
"label": "ClientProxy",
"importPath": "flwr.server.client_proxy",
"description": "flwr.server.client_proxy",
"isExtraImport": true,
"detail": "flwr.server.client_proxy",
"documentation": {}
},
{
"label": "reduce",
"importPath": "functools",
"description": "functools",
"isExtraImport": true,
"detail": "functools",
"documentation": {}
},
{
"label": "torchvision.transforms",
"kind": 6,
"isExtraImport": true,
"importPath": "torchvision.transforms",
"description": "torchvision.transforms",
"detail": "torchvision.transforms",
"documentation": {}
},
{
"label": "ImageFolder",
"importPath": "torchvision.datasets",
"description": "torchvision.datasets",
"isExtraImport": true,
"detail": "torchvision.datasets",
"documentation": {}
},
{
"label": "accuracy_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "precision_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "recall_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "f1_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "confusion_matrix",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "accuracy_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "precision_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "recall_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "f1_score",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "confusion_matrix",
"importPath": "sklearn.metrics",
"description": "sklearn.metrics",
"isExtraImport": true,
"detail": "sklearn.metrics",
"documentation": {}
},
{
"label": "matplotlib.pyplot",
"kind": 6,
"isExtraImport": true,
"importPath": "matplotlib.pyplot",
"description": "matplotlib.pyplot",
"detail": "matplotlib.pyplot",
"documentation": {}
},
{
"label": "tqdm",
"importPath": "tqdm",
"description": "tqdm",
"isExtraImport": true,
"detail": "tqdm",
"documentation": {}
},
{
"label": "tqdm",
"importPath": "tqdm",
"description": "tqdm",
"isExtraImport": true,
"detail": "tqdm",
"documentation": {}
},
{
"label": "torch.nn.functional",
"kind": 6,
"isExtraImport": true,
"importPath": "torch.nn.functional",
"description": "torch.nn.functional",
"detail": "torch.nn.functional",
"documentation": {}
},
{
"label": "seaborn",
"kind": 6,
"isExtraImport": true,
"importPath": "seaborn",
"description": "seaborn",
"detail": "seaborn",
"documentation": {}
},
{
"label": "Image",
"importPath": "PIL",
"description": "PIL",
"isExtraImport": true,
"detail": "PIL",
"documentation": {}
},
{
"label": "ImageDraw",
"importPath": "PIL",
"description": "PIL",
"isExtraImport": true,
"detail": "PIL",
"documentation": {}
},
{
"label": "ImageFont",
"importPath": "PIL",
"description": "PIL",
"isExtraImport": true,
"detail": "PIL",
"documentation": {}
},
{
"label": "#change the from main to the file you used to train your federated dataset\n set_seeds",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "create_folder",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_model",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_criterion",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_optimizer",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "load_train_val_data",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "load_test_data",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "train_model",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "test_model",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "calculate_metrics",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "save_metrics",
"importPath": "main_esaim",
"description": "main_esaim",
"isExtraImport": true,
"detail": "main_esaim",
"documentation": {}
},
{
"label": "Dataloader",
"kind": 6,
"importPath": "main",
"description": "main",
"peekOfCode": "class Dataloader(Dataset):\n def __init__(self, root_dir, loader_mode, imgsz):\n self.root_dir = root_dir\n self.loader_mode = loader_mode\n if self.loader_mode == \"test\":\n self.imgs = [elem for elem in glob.glob(os.path.join(root_dir, \"*\", \"*\"))]\n else:\n self.imgs = [elem for elem in glob.glob(os.path.join(root_dir, self.loader_mode, \"*\", \"*\"))]\n self.imgs_and_lbls = []\n for path in self.imgs:",
"detail": "main",
"documentation": {}
},
{
"label": "FlowerClient",
"kind": 6,
"importPath": "main",
"description": "main",
"peekOfCode": "class FlowerClient(fl.client.Client):\n \"\"\"\n Flower client using PyTorch\n \"\"\"\n def __init__(self, cid, model, train_dataloader, val_dataloader, device, root_dir, save_dir):\n self.cid = cid\n self.model = model\n self.train_dataloader = train_dataloader\n self.val_dataloader = val_dataloader\n self.device = device",
"detail": "main",
"documentation": {}
},
{
"label": "extract_zip",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def extract_zip(zip_file_name: str, save_dir: str):\n \"\"\"\n Extract .zip folder, if not already done\n \"\"\"\n parts = zip_file_name.split(\".\")\n file_name = parts[0]\n if not os.path.isdir(file_name):\n with zipfile.ZipFile(zip_file_name, \"r\") as zip_ref:\n zip_ref.extractall(save_dir)\n print(f\"Folder '{zip_file_name}' was successfully extracted\")",
"detail": "main",
"documentation": {}
},
{
"label": "set_seeds",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def set_seeds(seed):\n \"\"\"\n \"\"\"\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True",
"detail": "main",
"documentation": {}
},
{
"label": "load_train_val_data",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def load_train_val_data(root_dir, batch_size, imgsz):\n \"\"\"\n \"\"\"\n train_dataset = Dataloader(root_dir=root_dir, loader_mode=\"train\", imgsz=imgsz)\n val_dataset = Dataloader(root_dir=root_dir, loader_mode=\"val\", imgsz=imgsz)\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, shuffle=True)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, pin_memory=True, shuffle=False)\n return train_dataloader, val_dataloader\ndef load_test_data(root_dir, batch_size, imgsz):\n \"\"\"",
"detail": "main",
"documentation": {}
},
{
"label": "load_test_data",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def load_test_data(root_dir, batch_size, imgsz):\n \"\"\"\n \"\"\"\n test_dataset = Dataloader(root_dir=root_dir, loader_mode=\"test\", imgsz=imgsz)\n test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, shuffle=False)\n return test_dataloader\n# ----------------------------------------\n# Train model\n# ----------------------------------------\ndef train_model(model,",
"detail": "main",
"documentation": {}
},
{
"label": "train_model",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def train_model(model,\n train_dataloader,\n val_dataloader,\n optimizer,\n num_epochs,\n criterion,\n device,\n use_best_weights,\n early_stopping,\n save_dir):",
"detail": "main",
"documentation": {}
},
{
"label": "test_model",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def test_model(model, dataloader, criterion, device, num_classes):\n \"\"\"\n Test model and calculate confusion matrix\n \"\"\"\n model.to(device)\n model.eval()\n confusion_matrix = np.zeros((num_classes, num_classes))\n correct = 0\n total = 0\n loss = 0.0",
"detail": "main",
"documentation": {}
},
{
"label": "calculate_metrics",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def calculate_metrics(confusion_matrix):\n \"\"\"\n \"\"\"\n num_classes = len(confusion_matrix)\n Recall_list = []\n Precision_list = []\n F1_list = []\n for i in range(num_classes):\n TP_cls_i = confusion_matrix[i,i]\n FP_cls_i = sum(confusion_matrix[i,:]) - TP_cls_i",
"detail": "main",
"documentation": {}
},
{
"label": "save_metrics",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def save_metrics(metrics: Dict[str, Scalar], header, filename: str):\n \"\"\"\n Write results to a text file\n \"\"\"\n keys, vals = list(metrics.keys()), np.array(list(metrics.values())).T\n num_cols = len(metrics) + 1\n num_rows = vals.shape[0]\n header = ((\"%23s,\" * num_cols % tuple([header] + keys)).rstrip(\",\"))\n with open(filename, \"w\") as f:\n f.write(header + \"\\n\")",
"detail": "main",
"documentation": {}
},
{
"label": "get_model",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def get_model(model_config: str, num_classes: int):\n \"\"\"\n \"\"\"\n if model_config == \"EfficientNet\":\n model = efficientnet_v2_s(weights=\"IMAGENET1K_V1\")\n in_features = model.classifier[-1].in_features\n model.classifier[-1] = torch.nn.Linear(in_features, num_classes)\n elif model_config == \"VGG\":\n model = vgg19(weights=\"IMAGENET1K_V1\")\n in_features = model.classifier[-1].in_features",
"detail": "main",
"documentation": {}
},
{
"label": "get_strategy",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def get_strategy(strategy_config: str):\n \"\"\"\n \"\"\"\n if strategy_config == \"FedAvg\":\n strategy = FedAvg(fraction_fit=1.0,\n fraction_evaluate=0.0,\n min_fit_clients=CONFIG[\"num_clients\"],\n min_evaluate_clients=CONFIG[\"num_clients\"],\n min_available_clients=CONFIG[\"num_clients\"],\n on_fit_config_fn=fit_config,",
"detail": "main",
"documentation": {}
},
{
"label": "get_optimizer",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def get_optimizer(model, optimizer_config: str):\n \"\"\"\n \"\"\"\n if optimizer_config == \"SGD\":\n optimizer = torch.optim.SGD(model.parameters(),\n lr=CONFIG[\"lr\"],\n momentum=CONFIG[\"momentum\"])\n elif optimizer_config == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(),\n lr=CONFIG[\"lr\"],",
"detail": "main",
"documentation": {}
},
{
"label": "get_criterion",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def get_criterion(criterion_config: str):\n \"\"\"\n \"\"\"\n if criterion_config == \"CrossEntropyLoss\":\n criterion = nn.CrossEntropyLoss()\n return criterion\n# ------------------------------\n# Create folder\n# ------------------------------\ndef create_folder(dir: str):",
"detail": "main",
"documentation": {}
},
{
"label": "create_folder",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def create_folder(dir: str):\n \"\"\"\n Check if folder already exists\n If not create new folder\n \"\"\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n# ------------------------------\n# Save model as .pt\n# ------------------------------",
"detail": "main",
"documentation": {}
},
{
"label": "save_model",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def save_model(model, save_dir):\n \"\"\"\n Export model to TorchScript and save it\n \"\"\"\n if not os.path.exists(save_dir):\n model_scripted = torch.jit.script(model)\n model_scripted.save(save_dir)\n# ------------------------------\n# Get parameters\n# ------------------------------",
"detail": "main",
"documentation": {}
},
{
"label": "get_parameters",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def get_parameters(model) -> NDArrays:\n \"\"\"\n Return model parameters as a list of NumPy ndarrays\n \"\"\"\n return [val.cpu().numpy() for _, val in model.state_dict().items()]\n# ------------------------------\n# Set parameters\n# ------------------------------\ndef set_parameters(model, parameters: NDArrays) -> None:\n \"\"\"",
"detail": "main",
"documentation": {}
},
{
"label": "set_parameters",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def set_parameters(model, parameters: NDArrays) -> None:\n \"\"\"\n Set model parameters from a list of NumPy ndarrays\n \"\"\"\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n return\n# ------------------------------\n# Implement Flower client",
"detail": "main",
"documentation": {}
},
{
"label": "evaluate",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def evaluate(server_round: int,\n parameters: NDArrays,\n config: Dict[str, Scalar],\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n \"\"\"\n The evaluate function will be called by Flower after each server round\n \"\"\"\n # create folder\n save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"], \"global_model\")\n create_folder(save_dir)",
"detail": "main",
"documentation": {}
},
{
"label": "client_fn",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def client_fn(cid: str) -> FlowerClient:\n \"\"\"\n Create a Flower client instance. Each client will\n be trained and validated on his own unique data\n \"\"\"\n # load pretrained model\n model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\n # specify output folder\n root_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n save_dir = os.path.join(root_dir, f\"client_{int(cid)+1}\")",
"detail": "main",
"documentation": {}
},
{
"label": "fit_config",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def fit_config(server_round: int) -> Dict[str, Scalar]:\n \"\"\"\n Return training configuration dict for each round\n \"\"\"\n config = {\n \"server_round\": server_round,\n \"epochs\": CONFIG[\"epochs\"],\n \"optimizer\": CONFIG[\"optimizer\"],\n \"momentum\": CONFIG[\"momentum\"],\n \"lr\": CONFIG[\"lr\"],",
"detail": "main",
"documentation": {}
},
{
"label": "extract_metrics",
"kind": 2,
"importPath": "main",
"description": "main",
"peekOfCode": "def extract_metrics(metrics: Dict[str, Scalar]) -> Dict[str, Scalar]:\n \"\"\"\n \"\"\"\n extracted_metrics = {\n \"test_loss\": [],\n \"test_acc\": [],\n \"precision\": [],\n \"recall\": [],\n \"f1\": [],\n }",
"detail": "main",
"documentation": {}
},
{
"label": "config_file_name",
"kind": 5,
"importPath": "main",
"description": "main",
"peekOfCode": "config_file_name = \"config.yaml\"\nwith open(config_file_name) as file:\n CONFIG = yaml.safe_load(file)\n# ------------------------------\n# Extract .zip folder\n# ------------------------------\ndef extract_zip(zip_file_name: str, save_dir: str):\n \"\"\"\n Extract .zip folder, if not already done\n \"\"\"",
"detail": "main",
"documentation": {}
},
{
"label": "class_map_str_to_idx",
"kind": 5,
"importPath": "main",
"description": "main",
"peekOfCode": "class_map_str_to_idx = {\n \"electric fan\": 0,\n \"gas pump\": 1,\n \"hourglass\": 2,\n \"knot\": 3,\n \"loudspeaker\": 4,\n \"pinwheel\": 5,\n \"screen\": 6,\n \"space heater\": 7,\n \"stopwatch\": 8,",
"detail": "main",
"documentation": {}
},
{
"label": "Dataloader",
"kind": 6,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "class Dataloader(Dataset):\n def __init__(self, root_dir, loader_mode, imgsz):\n self.root_dir = root_dir\n self.loader_mode = loader_mode\n if self.loader_mode == \"test\":\n self.imgs = [elem for elem in glob.glob(os.path.join(root_dir, \"*\", \"*\"))]\n else:\n self.imgs = [elem for elem in glob.glob(os.path.join(root_dir, self.loader_mode, \"*\", \"*\"))]\n self.imgs_and_lbls = []\n for path in self.imgs:",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "FlowerClient",
"kind": 6,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "class FlowerClient(fl.client.Client):\n \"\"\"\n Flower client using PyTorch\n \"\"\"\n def __init__(self, cid, model, train_dataloader, val_dataloader, device, root_dir, save_dir):\n self.cid = cid\n self.model = model\n self.train_dataloader = train_dataloader\n self.val_dataloader = val_dataloader\n self.device = device",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "extract_zip",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def extract_zip(zip_file_name: str, save_dir: str):\n \"\"\"\n Extract .zip folder, if not already done\n \"\"\"\n parts = zip_file_name.split(\".\")\n file_name = parts[0]\n if not os.path.isdir(file_name):\n with zipfile.ZipFile(zip_file_name, \"r\") as zip_ref:\n zip_ref.extractall(save_dir)\n print(f\"Folder '{zip_file_name}' was successfully extracted\")",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "set_seeds",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def set_seeds(seed):\n \"\"\"\n \"\"\"\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "load_train_val_data",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def load_train_val_data(root_dir, batch_size, imgsz):\n \"\"\"\n \"\"\"\n train_dataset = Dataloader(root_dir=root_dir, loader_mode=\"train\", imgsz=imgsz)\n val_dataset = Dataloader(root_dir=root_dir, loader_mode=\"val\", imgsz=imgsz)\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, shuffle=True)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, pin_memory=True, shuffle=False)\n return train_dataloader, val_dataloader\ndef load_test_data(root_dir, batch_size, imgsz):\n \"\"\"",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "load_test_data",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def load_test_data(root_dir, batch_size, imgsz):\n \"\"\"\n \"\"\"\n test_dataset = Dataloader(root_dir=root_dir, loader_mode=\"test\", imgsz=imgsz)\n test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, shuffle=False)\n return test_dataloader\n# ----------------------------------------\n# Train model\n# ----------------------------------------\ndef train_model(model,",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "train_model",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def train_model(model,\n train_dataloader,\n val_dataloader,\n optimizer,\n num_epochs,\n criterion,\n device,\n use_best_weights,\n early_stopping,\n save_dir):",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "test_model",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def test_model(model, dataloader, criterion, device, num_classes):\n \"\"\"\n Test model and calculate confusion matrix\n \"\"\"\n model.to(device)\n model.eval()\n confusion_matrix = np.zeros((num_classes, num_classes))\n correct = 0\n total = 0\n loss = 0.0",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "calculate_metrics",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def calculate_metrics(confusion_matrix):\n \"\"\"\n \"\"\"\n num_classes = len(confusion_matrix)\n Recall_list = []\n Precision_list = []\n F1_list = []\n for i in range(num_classes):\n TP_cls_i = confusion_matrix[i,i]\n FP_cls_i = sum(confusion_matrix[i,:]) - TP_cls_i",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "save_metrics",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def save_metrics(metrics: Dict[str, Scalar], header, filename: str):\n \"\"\"\n Write results to a text file\n \"\"\"\n keys, vals = list(metrics.keys()), np.array(list(metrics.values())).T\n num_cols = len(metrics) + 1\n num_rows = vals.shape[0]\n header = ((\"%23s,\" * num_cols % tuple([header] + keys)).rstrip(\",\"))\n with open(filename, \"w\") as f:\n f.write(header + \"\\n\")",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_model",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def get_model(model_config: str, num_classes: int):\n \"\"\"\n \"\"\"\n if model_config == \"EfficientNet\":\n model = efficientnet_v2_s(weights=\"IMAGENET1K_V1\")\n in_features = model.classifier[-1].in_features\n model.classifier[-1] = torch.nn.Linear(in_features, num_classes)\n elif model_config == \"VGG\":\n model = vgg19(weights=\"IMAGENET1K_V1\")\n in_features = model.classifier[-1].in_features",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_strategy",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def get_strategy(strategy_config: str):\n \"\"\"\n \"\"\"\n if strategy_config == \"FedAvg\":\n strategy = FedAvg(fraction_fit=1.0,\n fraction_evaluate=0.0,\n min_fit_clients=CONFIG[\"num_clients\"],\n min_evaluate_clients=CONFIG[\"num_clients\"],\n min_available_clients=CONFIG[\"num_clients\"],\n on_fit_config_fn=fit_config,",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_optimizer",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def get_optimizer(model, optimizer_config: str):\n \"\"\"\n \"\"\"\n if optimizer_config == \"SGD\":\n optimizer = torch.optim.SGD(model.parameters(),\n lr=CONFIG[\"lr\"],\n momentum=CONFIG[\"momentum\"])\n elif optimizer_config == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(),\n lr=CONFIG[\"lr\"],",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_criterion",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def get_criterion(criterion_config: str):\n \"\"\"\n \"\"\"\n if criterion_config == \"CrossEntropyLoss\":\n criterion = nn.CrossEntropyLoss()\n return criterion\n# ------------------------------\n# Create folder\n# ------------------------------\ndef create_folder(dir: str):",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "create_folder",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def create_folder(dir: str):\n \"\"\"\n Check if folder already exists\n If not create new folder\n \"\"\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n# ------------------------------\n# Save model as .pt\n# ------------------------------",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "save_model",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def save_model(model, save_dir):\n \"\"\"\n Export model to TorchScript and save it\n \"\"\"\n if not os.path.exists(save_dir):\n model_scripted = torch.jit.script(model)\n model_scripted.save(save_dir)\n# ------------------------------\n# Get parameters\n# ------------------------------",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "get_parameters",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def get_parameters(model) -> NDArrays:\n \"\"\"\n Return model parameters as a list of NumPy ndarrays\n \"\"\"\n return [val.cpu().numpy() for _, val in model.state_dict().items()]\n# ------------------------------\n# Set parameters\n# ------------------------------\ndef set_parameters(model, parameters: NDArrays) -> None:\n \"\"\"",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "set_parameters",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def set_parameters(model, parameters: NDArrays) -> None:\n \"\"\"\n Set model parameters from a list of NumPy ndarrays\n \"\"\"\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n return\n# ------------------------------\n# Implement Flower client",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "evaluate",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def evaluate(server_round: int,\n parameters: NDArrays,\n config: Dict[str, Scalar],\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n \"\"\"\n The evaluate function will be called by Flower after each server round\n \"\"\"\n # create folder\n save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"], \"global_model\")\n create_folder(save_dir)",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "client_fn",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def client_fn(cid: str) -> FlowerClient:\n \"\"\"\n Create a Flower client instance. Each client will\n be trained and validated on his own unique data\n \"\"\"\n # load pretrained model\n model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\n # specify output folder\n root_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n save_dir = os.path.join(root_dir, f\"client_{int(cid)+1}\")",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "fit_config",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def fit_config(server_round: int) -> Dict[str, Scalar]:\n \"\"\"\n Return training configuration dict for each round\n \"\"\"\n config = {\n \"server_round\": server_round,\n \"epochs\": CONFIG[\"epochs\"],\n \"optimizer\": CONFIG[\"optimizer\"],\n \"momentum\": CONFIG[\"momentum\"],\n \"lr\": CONFIG[\"lr\"],",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "extract_metrics",
"kind": 2,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "def extract_metrics(metrics: Dict[str, Scalar]) -> Dict[str, Scalar]:\n \"\"\"\n \"\"\"\n extracted_metrics = {\n \"test_loss\": [],\n \"test_acc\": [],\n \"precision\": [],\n \"recall\": [],\n \"f1\": [],\n }",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "config_file_name",
"kind": 5,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "config_file_name = \"config.yaml\"\nwith open(config_file_name) as file:\n CONFIG = yaml.safe_load(file)\n# ------------------------------\n# Extract .zip folder\n# ------------------------------\ndef extract_zip(zip_file_name: str, save_dir: str):\n \"\"\"\n Extract .zip folder, if not already done\n \"\"\"",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "class_map_str_to_idx",
"kind": 5,
"importPath": "main_esaim",
"description": "main_esaim",
"peekOfCode": "class_map_str_to_idx = {\n \"No_windshield\": 0,\n \"With_windshield\": 1\n}\n# ----------------------------------------\n# Load data\n# ----------------------------------------\nclass Dataloader(Dataset):\n def __init__(self, root_dir, loader_mode, imgsz):\n self.root_dir = root_dir",
"detail": "main_esaim",
"documentation": {}
},
{
"label": "Dataloader",
"kind": 6,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "class Dataloader(Dataset):\n def __init__(self, root_dir, loader_mode, imgsz):\n self.root_dir = root_dir\n self.loader_mode = loader_mode\n if self.loader_mode == \"test\":\n self.imgs = [elem for elem in glob.glob(os.path.join(root_dir, \"*\", \"*\"))]\n else:\n self.imgs = [elem for elem in glob.glob(os.path.join(root_dir, self.loader_mode, \"*\", \"*\"))]\n self.imgs_and_lbls = []\n for path in self.imgs:",
"detail": "main_flta",
"documentation": {}
},
{
"label": "FlowerClient",
"kind": 6,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "class FlowerClient(fl.client.Client):\n \"\"\"\n Flower client using PyTorch\n \"\"\"\n def __init__(self, cid, model, train_dataloader, val_dataloader, device, root_dir, save_dir):\n self.cid = cid\n self.model = model\n self.train_dataloader = train_dataloader\n self.val_dataloader = val_dataloader\n self.device = device",
"detail": "main_flta",
"documentation": {}
},
{
"label": "extract_zip",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def extract_zip(zip_file_name: str, save_dir: str):\n \"\"\"\n Extract .zip folder, if not already done\n \"\"\"\n parts = zip_file_name.split(\".\")\n file_name = parts[0]\n if not os.path.isdir(file_name):\n with zipfile.ZipFile(zip_file_name, \"r\") as zip_ref:\n zip_ref.extractall(save_dir)\n print(f\"Folder '{zip_file_name}' was successfully extracted\")",
"detail": "main_flta",
"documentation": {}
},
{
"label": "set_seeds",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def set_seeds(seed):\n \"\"\"\n \"\"\"\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True",
"detail": "main_flta",
"documentation": {}
},
{
"label": "load_train_val_data",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def load_train_val_data(root_dir, batch_size, imgsz):\n \"\"\"\n \"\"\"\n train_dataset = Dataloader(root_dir=root_dir, loader_mode=\"train\", imgsz=imgsz)\n val_dataset = Dataloader(root_dir=root_dir, loader_mode=\"val\", imgsz=imgsz)\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, shuffle=True)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, pin_memory=True, shuffle=False)\n return train_dataloader, val_dataloader\ndef load_test_data(root_dir, batch_size, imgsz):\n \"\"\"",
"detail": "main_flta",
"documentation": {}
},
{
"label": "load_test_data",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def load_test_data(root_dir, batch_size, imgsz):\n \"\"\"\n \"\"\"\n test_dataset = Dataloader(root_dir=root_dir, loader_mode=\"test\", imgsz=imgsz)\n test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, shuffle=False)\n return test_dataloader\n# ----------------------------------------\n# Train model\n# ----------------------------------------\ndef train_model(model,",
"detail": "main_flta",
"documentation": {}
},
{
"label": "train_model",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def train_model(model,\n train_dataloader,\n val_dataloader,\n optimizer,\n num_epochs,\n criterion,\n device,\n use_best_weights,\n early_stopping,\n save_dir=None):",
"detail": "main_flta",
"documentation": {}
},
{
"label": "test_model",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def test_model(model, dataloader, criterion, device, num_classes):\n \"\"\"\n Test model and calculate confusion matrix\n \"\"\"\n model.to(device)\n model.eval()\n confusion_matrix = np.zeros((num_classes, num_classes))\n correct = 0\n total = 0\n loss = 0.0",
"detail": "main_flta",
"documentation": {}
},
{
"label": "calculate_metrics",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def calculate_metrics(confusion_matrix):\n \"\"\"\n \"\"\"\n num_classes = len(confusion_matrix)\n Recall_list = []\n Precision_list = []\n F1_list = []\n for i in range(num_classes):\n TP_cls_i = confusion_matrix[i,i]\n FP_cls_i = sum(confusion_matrix[i,:]) - TP_cls_i",
"detail": "main_flta",
"documentation": {}
},
{
"label": "save_metrics",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def save_metrics(metrics: Dict[str, Scalar], header, filename: str):\n \"\"\"\n Write results to a text file\n \"\"\"\n keys, vals = list(metrics.keys()), np.array(list(metrics.values())).T\n num_cols = len(metrics) + 1\n num_rows = vals.shape[0]\n header = ((\"%23s,\" * num_cols % tuple([header] + keys)).rstrip(\",\"))\n with open(filename, \"w\") as f:\n f.write(header + \"\\n\")",
"detail": "main_flta",
"documentation": {}
},
{
"label": "get_model",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def get_model(model_config: str, num_classes: int):\n \"\"\"\n \"\"\"\n if model_config == \"EfficientNet\":\n model = efficientnet_v2_s(weights=\"IMAGENET1K_V1\")\n in_features = model.classifier[-1].in_features\n model.classifier[-1] = torch.nn.Linear(in_features, num_classes)\n elif model_config == \"VGG\":\n model = vgg19(weights=\"IMAGENET1K_V1\")\n in_features = model.classifier[-1].in_features",
"detail": "main_flta",
"documentation": {}
},
{
"label": "get_strategy",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def get_strategy(strategy_config: str):\n \"\"\"\n \"\"\"\n if strategy_config == \"FedAvg\":\n strategy = FedAvg(fraction_fit=1.0,\n fraction_evaluate=0.0,\n min_fit_clients=CONFIG[\"num_clients\"],\n min_evaluate_clients=CONFIG[\"num_clients\"],\n min_available_clients=CONFIG[\"num_clients\"],\n on_fit_config_fn=fit_config,",
"detail": "main_flta",
"documentation": {}
},
{
"label": "get_optimizer",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def get_optimizer(model, optimizer_config: str):\n \"\"\"\n \"\"\"\n if optimizer_config == \"SGD\":\n optimizer = torch.optim.SGD(model.parameters(),\n lr=CONFIG[\"lr\"],\n momentum=CONFIG[\"momentum\"])\n elif optimizer_config == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(),\n lr=CONFIG[\"lr\"],",
"detail": "main_flta",
"documentation": {}
},
{
"label": "get_criterion",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def get_criterion(criterion_config: str):\n \"\"\"\n \"\"\"\n if criterion_config == \"CrossEntropyLoss\":\n criterion = nn.CrossEntropyLoss()\n return criterion\n# ------------------------------\n# Create folder\n# ------------------------------\ndef create_folder(dir: str):",
"detail": "main_flta",
"documentation": {}
},
{
"label": "create_folder",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def create_folder(dir: str):\n \"\"\"\n Check if folder already exists\n If not create new folder\n \"\"\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n# ------------------------------\n# Save model as .pt\n# ------------------------------",
"detail": "main_flta",
"documentation": {}
},
{
"label": "save_model",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def save_model(model, save_dir):\n \"\"\"\n Export model to TorchScript and save it\n \"\"\"\n if not os.path.exists(save_dir):\n model_scripted = torch.jit.script(model)\n model_scripted.save(save_dir)\n# ------------------------------\n# Get parameters\n# ------------------------------",
"detail": "main_flta",
"documentation": {}
},
{
"label": "get_parameters",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def get_parameters(model) -> NDArrays:\n \"\"\"\n Return model parameters as a list of NumPy ndarrays\n \"\"\"\n return [val.cpu().numpy() for _, val in model.state_dict().items()]\n# ------------------------------\n# Set parameters\n# ------------------------------\ndef set_parameters(model, parameters: NDArrays) -> None:\n \"\"\"",
"detail": "main_flta",
"documentation": {}
},
{
"label": "set_parameters",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def set_parameters(model, parameters: NDArrays) -> None:\n \"\"\"\n Set model parameters from a list of NumPy ndarrays\n \"\"\"\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n return\n# ------------------------------\n# Implement Flower client",
"detail": "main_flta",
"documentation": {}
},
{
"label": "evaluate",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def evaluate(server_round: int,\n parameters: NDArrays,\n config: Dict[str, Scalar],\n ) -> Optional[Tuple[float, Dict[str, Scalar]]]:\n \"\"\"\n The evaluate function will be called by Flower after each server round\n \"\"\"\n # create folder\n save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"], \"global_model\")\n create_folder(save_dir)",
"detail": "main_flta",
"documentation": {}
},
{
"label": "client_fn",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def client_fn(cid: str) -> FlowerClient:\n \"\"\"\n Create a Flower client instance. Each client will\n be trained and validated on his own unique data\n \"\"\"\n # load pretrained model\n model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\n # specify output folder\n root_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n save_dir = os.path.join(root_dir, f\"client_{int(cid)+1}\")",
"detail": "main_flta",
"documentation": {}
},
{
"label": "fit_config",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def fit_config(server_round: int) -> Dict[str, Scalar]:\n \"\"\"\n Return training configuration dict for each round\n \"\"\"\n config = {\n \"server_round\": server_round,\n \"epochs\": CONFIG[\"epochs\"],\n \"optimizer\": CONFIG[\"optimizer\"],\n \"momentum\": CONFIG[\"momentum\"],\n \"lr\": CONFIG[\"lr\"],",
"detail": "main_flta",
"documentation": {}
},
{
"label": "extract_metrics",
"kind": 2,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "def extract_metrics(metrics: Dict[str, Scalar]) -> Dict[str, Scalar]:\n \"\"\"\n \"\"\"\n extracted_metrics = {\n \"test_loss\": [],\n \"test_acc\": [],\n \"precision\": [],\n \"recall\": [],\n \"f1\": [],\n }",
"detail": "main_flta",
"documentation": {}
},
{
"label": "config_file_name",
"kind": 5,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "config_file_name = \"config.yaml\"\nwith open(config_file_name) as file:\n CONFIG = yaml.safe_load(file)\n# ------------------------------\n# Extract .zip folder\n# ------------------------------\ndef extract_zip(zip_file_name: str, save_dir: str):\n \"\"\"\n Extract .zip folder, if not already done\n \"\"\"",
"detail": "main_flta",
"documentation": {}
},
{
"label": "class_map_str_to_idx",
"kind": 5,
"importPath": "main_flta",
"description": "main_flta",
"peekOfCode": "class_map_str_to_idx = {\n \"No_windshield\": 0,\n \"Windshield_TypeA\": 1,\n \"Windshield_TypeB\": 2,\n \"Windshield_TypeC\": 3,\n \"Windshield_TypeD\": 4\n}\n# ----------------------------------------\n# Load data\n# ----------------------------------------",
"detail": "main_flta",
"documentation": {}
},
{
"label": "PlainFedAvg",
"kind": 6,
"importPath": "strategy",
"description": "strategy",
"peekOfCode": "class PlainFedAvg(fl.server.strategy.FedAvg):\n def aggregate_fit(self,\n server_round: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n \"\"\"\n Aggregate fit results using plain average\n \"\"\"\n if not results:",
"detail": "strategy",
"documentation": {}
},
{
"label": "aggregate_plain_average",
"kind": 2,
"importPath": "strategy",
"description": "strategy",
"peekOfCode": "def aggregate_plain_average(results: List[Tuple[NDArrays, int]]) -> NDArrays:\n \"\"\"\n Compute plain average\n \"\"\"\n weights_list = [weights for weights, _ in results]\n # Compute average weights of each layer\n weights_prime: NDArrays = [\n reduce(np.add, layer_updates) / len(weights_list)\n for layer_updates in zip(*weights_list)\n ]",
"detail": "strategy",
"documentation": {}
},
{
"label": "read_config",
"kind": 2,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "def read_config(config_file):\n with open(config_file, 'r') as f:\n config = yaml.safe_load(f)\n return config\n# Function to set up model architecture with configurable last layers\ndef setup_model(num_classes, model_name):\n model_classes = {\n \"EfficientNet\": efficientnet_v2_s,\n \"VGG\": vgg19,\n \"ResNet\": resnet50,",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "setup_model",
"kind": 2,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "def setup_model(num_classes, model_name):\n model_classes = {\n \"EfficientNet\": efficientnet_v2_s,\n \"VGG\": vgg19,\n \"ResNet\": resnet50,\n \"InceptionNet\": inception_v3,\n \"DenseNet\": densenet121\n }\n model = model_classes[model_name](pretrained=False)\n if model_name in [\"VGG\", \"EfficientNet\"]:",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "load_weights",
"kind": 2,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "def load_weights(model, weights_path):\n loaded_model = torch.jit.load(weights_path) # Load the TorchScript model\n model.load_state_dict(loaded_model.state_dict()) # Copy the parameters from the loaded model to the provided model\n model.eval() # Set the model to evaluation mode\nclass_map_str_to_idx = {\n \"No_windshield\": 0,\n \"With_windshield\": 1\n}\n# Function to test the model\ndef test_model(model, test_loader):",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "test_model",
"kind": 2,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "def test_model(model, test_loader):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n criterion = nn.CrossEntropyLoss()\n all_predictions = []\n all_targets = []\n all_confidences = [] # Store confidences\n with torch.no_grad(), tqdm(total=len(test_loader), desc=\"Running tests\") as pbar:\n for images, labels in test_loader:\n images, labels = images.to(device), labels.to(device)",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "save_results",
"kind": 2,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "def save_results(output_folder, all_predictions, all_targets, metrics, config, test_dataset, all_confidences):\n # Create output folder if it doesn't exist\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n # Create folders based on class mapping\n for class_name in class_map_str_to_idx.keys():\n class_folder = os.path.join(output_folder, class_name)\n os.makedirs(class_folder, exist_ok=True)\n # Move images to respective class folders based on predictions\n for image_path, prediction, target, confidence in zip(test_dataset.imgs, all_predictions, all_targets, all_confidences):",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "main",
"kind": 2,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "def main(config_file):\n # Read configuration\n config = read_config(config_file)\n # Set up output folder\n output_folder = config['output_folder']\n if os.path.exists(output_folder):\n print(\"Output folder already exists. Exiting.\")\n return\n # Set hyperparameters\n num_classes = config['num_classes']",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "class_map_str_to_idx",
"kind": 5,
"importPath": "test_global_weight",
"description": "test_global_weight",
"peekOfCode": "class_map_str_to_idx = {\n \"No_windshield\": 0,\n \"With_windshield\": 1\n}\n# Function to test the model\ndef test_model(model, test_loader):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n criterion = nn.CrossEntropyLoss()\n all_predictions = []",
"detail": "test_global_weight",
"documentation": {}
},
{
"label": "config_file_name",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "config_file_name = \"config.yaml\"\nwith open(config_file_name) as file:\n CONFIG = yaml.safe_load(file)\n# specify output folder\nsave_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n# check if folder already exists\nif os.path.exists(save_dir):\n print(\"Output folder already exists. Exiting the program.\")\n sys.exit()\n# create folder",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "save_dir",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "save_dir = os.path.join(\"runs\", CONFIG[\"output_folder\"])\n# check if folder already exists\nif os.path.exists(save_dir):\n print(\"Output folder already exists. Exiting the program.\")\n sys.exit()\n# create folder\ncreate_folder(save_dir)\nprint(\"Output folder created.\")\n# move copy of config file to output folder\nshutil.copy(config_file_name, os.path.join(save_dir, config_file_name))",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "cid",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "cid = CONFIG[\"client_id\"]\n# set seeds\nset_seeds(CONFIG[\"seed\"])\nprint(\"Seeds set.\")\n# load pretrained model\nmodel = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\nprint(\"Pretrained model loaded.\")\n# load data\nprint(\"Loading training and validation data...\")\ntrain_dataloader, val_dataloader = load_train_val_data(",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "model",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "model = get_model(CONFIG[\"model\"], CONFIG[\"num_classes\"])\nprint(\"Pretrained model loaded.\")\n# load data\nprint(\"Loading training and validation data...\")\ntrain_dataloader, val_dataloader = load_train_val_data(\n os.path.join(\"dataset/ESAIM\", f\"Client{int(cid)}\"),\n CONFIG[\"batch_size\"],\n CONFIG[\"img_size\"],\n)\nprint(\"Training and validation data loaded.\")",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "test_dataloader",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "test_dataloader = load_test_data(\n \"dataset/ESAIM/global_test_set\",\n CONFIG[\"batch_size\"],\n CONFIG[\"img_size\"],\n)\nprint(\"Test data loaded.\")\n# save one batch of training data\nimgs, _ = next(iter(train_dataloader))\ntrain_batch0 = make_grid(imgs, nrow=4, scale_each=True)\nsave_image(train_batch0, os.path.join(save_dir, \"train_batch0\"), \"png\")",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "train_batch0",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "train_batch0 = make_grid(imgs, nrow=4, scale_each=True)\nsave_image(train_batch0, os.path.join(save_dir, \"train_batch0\"), \"png\")\nprint(\"One batch of training data saved as image.\")\n# specify optimizer\noptimizer = get_optimizer(model, CONFIG[\"optimizer\"])\nprint(\"Optimizer specified.\")\n# specify loss function\ncriterion = get_criterion(CONFIG[\"loss_fcn\"])\nprint(\"Loss function specified.\")\n# specify device",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "optimizer",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "optimizer = get_optimizer(model, CONFIG[\"optimizer\"])\nprint(\"Optimizer specified.\")\n# specify loss function\ncriterion = get_criterion(CONFIG[\"loss_fcn\"])\nprint(\"Loss function specified.\")\n# specify device\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n# train model\nprint(\"Training the model...\")",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "criterion",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "criterion = get_criterion(CONFIG[\"loss_fcn\"])\nprint(\"Loss function specified.\")\n# specify device\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n# train model\nprint(\"Training the model...\")\nmetrics_dict = train_model(\n model,\n train_dataloader,",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "device",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device in use: {device}\")\n# train model\nprint(\"Training the model...\")\nmetrics_dict = train_model(\n model,\n train_dataloader,\n val_dataloader,\n optimizer,\n CONFIG[\"epochs\"],",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "metrics_dict",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "metrics_dict = train_model(\n model,\n train_dataloader,\n val_dataloader,\n optimizer,\n CONFIG[\"epochs\"],\n criterion,\n device,\n CONFIG[\"use_best_weights\"],\n CONFIG[\"early_stopping\"],",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "best_epoch_weights_path",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "best_epoch_weights_path = os.path.join(save_dir, \"best.pt\")\nif os.path.exists(best_epoch_weights_path):\n print(\"Loading best epoch weights...\")\n model.load_state_dict(torch.jit.load(best_epoch_weights_path).state_dict())\n print(\"Best epoch weights loaded successfully.\")\nelse:\n print(\"Best epoch weights not found. Using final weights for testing.\")\n# test model\nprint(\"Testing the model...\")\ntest_acc, test_loss, confusion_matrix = test_model(",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "num_epochs_completed",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "num_epochs_completed = len(metrics_dict.get(\"train_loss\", []))\n# Add test metrics to dict and pad the lists\nmetrics_dict[\"test_acc\"] = [0] * (num_epochs_completed - 1) + [test_acc]\nmetrics_dict[\"test_loss\"] = [0] * (num_epochs_completed - 1) + [test_loss]\nmetrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "metrics_dict[\"test_acc\"]",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "metrics_dict[\"test_acc\"] = [0] * (num_epochs_completed - 1) + [test_acc]\nmetrics_dict[\"test_loss\"] = [0] * (num_epochs_completed - 1) + [test_loss]\nmetrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "metrics_dict[\"test_loss\"]",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "metrics_dict[\"test_loss\"] = [0] * (num_epochs_completed - 1) + [test_loss]\nmetrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "metrics_dict[\"precision\"]",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "metrics_dict[\"precision\"] = [0] * (num_epochs_completed - 1) + [average_Precision]\nmetrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "metrics_dict[\"recall\"]",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "metrics_dict[\"recall\"] = [0] * (num_epochs_completed - 1) + [average_Recall]\nmetrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail": "train_and_test",
"documentation": {}
},
{
"label": "metrics_dict[\"f1\"]",
"kind": 5,
"importPath": "train_and_test",
"description": "train_and_test",
"peekOfCode": "metrics_dict[\"f1\"] = [0] * (num_epochs_completed - 1) + [average_F1]\n# write training and validation metrics to .txt file\nprint(\"Saving metrics to a file...\")\nsave_metrics(metrics_dict, \"epoch\", os.path.join(save_dir, \"results.txt\"))\nprint(\"Metrics saved to results.txt in the output folder.\")\nprint(\"\\nResults are stored in:\", save_dir)",
"detail": "train_and_test",
"documentation": {}
}
]
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Mermaid Flowchart</title>
<script type="module">
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
mermaid.initialize({ startOnLoad: true });
</script>
</head>
<body>
<div class="mermaid">
graph TD;
A[Start] --> B[Process 1];
B --> C[Process 2];
C --> D[End];
</div>
</body>
</html>
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment