From a188465402c3c1621065bd0e77abb299a0e61b24 Mon Sep 17 00:00:00 2001 From: Zhao-Penghai Date: Wed, 25 Dec 2024 10:12:00 +0800 Subject: [PATCH] 241225 --- README.md | 2 +- offcial_train.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 49285e5..e01fdd7 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ Finally, type `sh train.sh` in the console. Wating for the training ends~ ## Testing (to reproduce, optional) Similar to fine-tuning, prepare `test.sh` as below: ``` -python inference.py \ +python official_test.py \ --data_path ScImpactPredict/NAID/NAID_test_extrainfo.csv \ --weight_dir path_to_runs_dir ``` diff --git a/offcial_train.py b/offcial_train.py index 5450f05..469a762 100644 --- a/offcial_train.py +++ b/offcial_train.py @@ -101,7 +101,7 @@ def main(args): train_size = int(0.9 * total_size) val_size = total_size - train_size train_dataset, val_dataset = random_split(total_dataset, [train_size, val_size]) - test_dataset = TextDataset(df_test, tokenizer, args.max_length) + test_dataset = TextDataset(df_test, tokenizer, args.max_length) # DO NOT USE FOR PARAMETER SEARCHING # Prepare Accelerator accelerator = Accelerator() @@ -171,7 +171,6 @@ def get_args(): help='Directory for storing TensorBoard logs and model checkpoints') # Dataset and training configuration - parser.add_argument('--batch_size', type=int, default=16, help='Batch size for training and validation') parser.add_argument('--total_epochs', type=int, default=5, help='Total number of epochs to train') parser.add_argument('--base_lr', type=float, default=5e-5, help='Base learning rate for the optimizer') parser.add_argument('--learning_rate', type=float, default=1e-4, help='Learning rate for the optimizer')