From ce2ada2617b47810e31a331ea26bd7d49e36ce44 Mon Sep 17 00:00:00 2001 From: Carl Philipp Klemm Date: Tue, 7 May 2024 15:05:22 +0200 Subject: [PATCH] remove stray breakpoint, fix mypy warning --- arguments.py | 2 +- datamodules.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arguments.py b/arguments.py index ef13b1b..6f645a5 100644 --- a/arguments.py +++ b/arguments.py @@ -92,5 +92,5 @@ class TrainingArguments(): max_instant_params: int = field(default=0, metadata={"help": "Maximum amount of paramters to optimize per step in millions"}) churn_percent: int = field(default=100, metadata={"help": "The percentage of active parameters to replace when changeing active parameters"}) eval_steps: int = field(default=-1, metadata={"help": "Number of optimization steps after wich to compute the evaluation loss"}) - eval_prompt: str = field(default=None, metadata={"help": "A prompt to used during eval to check if the model is learning"}) + eval_prompt: str | None = field(default=None, metadata={"help": "A prompt to used during eval to check if the model is learning"}) reshufle_steps: int = field(default=50, metadata={"help": "Number of steps to take before changing the active parameters"}) diff --git a/datamodules.py b/datamodules.py index 0e36a6d..e7ac922 100644 --- a/datamodules.py +++ b/datamodules.py @@ -218,7 +218,6 @@ def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: D eval_dataset = dataset['eval'] else: print('Splitting train dataset in train and validation according to `eval_dataset_size`') - breakpoint() dataset = dataset['train'].train_test_split( test_size=data_args.eval_dataset_size, shuffle=True, seed=42 )