Skip to content

Commit

Permalink
Spell Check 🥲
Browse files Browse the repository at this point in the history
  • Loading branch information
AidinHamedi committed Oct 10, 2024
1 parent 714c122 commit f3ba6fe
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 10 deletions.
23 changes: 22 additions & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,24 @@
{
"sarif-viewer.connectToGithubCodeScanning": "off"
"sarif-viewer.connectToGithubCodeScanning": "off",
"cSpell.words": [
"amsbound",
"annot",
"cmap",
"Efficientnet",
"ISCAM",
"nesterov",
"nrow",
"Onecycle",
"optim",
"SSCAM",
"subkey",
"subvalue",
"tablefmt",
"unscale",
"vutils",
"xlabel",
"xticklabels",
"ylabel",
"yticklabels"
],
}
20 changes: 11 additions & 9 deletions Main.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
}
],
"source": [
"# /* spell-checker: disable */\n",
"# Error Suppressing >>>\n",
"import warnings\n",
"\n",
Expand Down Expand Up @@ -80,6 +81,7 @@
" LayerCAM,\n",
")\n",
"\n",
"\n",
"# Utils >>>\n",
"from Utils.Record_Var import Record_var\n",
"from Utils.print_color import print_colored as cprint\n",
Expand Down Expand Up @@ -250,8 +252,8 @@
" img_size=img_res,\n",
" color_mode=img_format,\n",
")\n",
"# Peprocessing data >>>\n",
"cprint(\"<Fore.YELLOW>Peprocessing the data...\")\n",
"# Preprocessing data >>>\n",
"cprint(\"<Fore.YELLOW>Preprocessing the data...\")\n",
"# One hot encode the labels\n",
"cprint(\n",
" f\" │ <Fore.CYAN>One hot encoding the labels... <Fore.GREEN>[num_classes: {num_classes}]\"\n",
Expand Down Expand Up @@ -5833,11 +5835,11 @@
"dynamic_agmentation_scaling = True # if True, the magnitude will be scaled with the epoch\n",
"dynamic_agmentation_scaling_fn = lambda epoch: min(epoch / (98 / 1), 8) # output: magnitude (1-30) | input: epoch\n",
"early_stop_patience = 86 # After n epochs without improvement, stop training and load the best model\n",
"train_eval_portion = 0.14 # The proportion of the training data that it used to messure the model's performance on train data\n",
"gradient_clipping = True # If True, the gradient will be clipped to a maximum value/norm (Pervent exploding gradients)\n",
"train_eval_portion = 0.14 # The proportion of the training data that it used to measure the model's performance on train data\n",
"gradient_clipping = True # If True, the gradient will be clipped to a maximum value/norm (Prevent exploding gradients)\n",
"gradient_clipping_max = 1.2 # The maximum of the gradient\n",
"gradient_clipping_method = \"Norm\" # \"Norm\" or \"Value\"\n",
"agmentation_method = \"runtime\" # \"runtime\" (Stable-LowRamUsage) Recomended or \"Pre_epoch\" (Unstable-HighRamUsage) Deparicated!\n",
"agmentation_method = \"runtime\" # \"runtime\" (Stable-LowRamUsage) Recommended or \"Pre_epoch\" (Unstable-HighRamUsage) Deprecated!\n",
"gradient_accumulation = False # If True, the gradient will be accumulated over multiple batches before being applied to the model. (To save memory if you need to use a large batch size)\n",
"gradient_accumulation_steps = 2 # The number of batches to accumulate the gradient over\n",
"exponential_moving_average = True # If True, the model will be averaged over the last n epochs\n",
Expand Down Expand Up @@ -5935,7 +5937,7 @@
"# Prep\n",
"torch.cuda.empty_cache()\n",
"gc.collect()\n",
"cprint(\"<Fore.YELLOW>Starting the traning engine...\")\n",
"cprint(\"<Fore.YELLOW>Starting the training engine...\")\n",
"History = {\"Train\": [], \"Val\": []}\n",
"Cache_dict = {}\n",
"if not Is_tensor:\n",
Expand Down Expand Up @@ -6006,7 +6008,7 @@
"cprint(\" │ <Fore.CYAN>Adding tensorboard model graph...\")\n",
"TB_Data_writer.add_graph(model, torch.rand(1, x_test.shape[1], *img_res).to(device))\n",
"train_reporter.Add_Prams(Train_Confs)\n",
"# Traing start msg\n",
"# Training start msg\n",
"cprint(\"<Fore.YELLOW>Starting the training...\")\n",
"# Main training loop\n",
"try:\n",
Expand Down Expand Up @@ -6114,7 +6116,7 @@
" model.train()\n",
" # Moving loss to `device`\n",
" loss_fn = loss_fn.to(device)\n",
" # Tarin Verbose Prep\n",
" # Train Verbose Prep\n",
" cprint(\n",
" f\" │ <Fore.CYAN>Train Eval History len: <Fore.GREEN>{TrainEval_Data_len}/{Train_total_batches}\"\n",
" )\n",
Expand Down Expand Up @@ -6200,7 +6202,7 @@
" progress_bar.update()\n",
" # Close progress bar\n",
" progress_bar.close()\n",
" # Lerning rate scheduler step + moving the loss to cpu + Weight Averaging bn update\n",
" # Learning rate scheduler step + moving the loss to cpu + Weight Averaging bn update\n",
" loss_fn = loss_fn.cpu()\n",
" if lr_scheduler_update_method == \"Epoch\":\n",
" lr_scheduler.step()\n",
Expand Down

0 comments on commit f3ba6fe

Please sign in to comment.