remove stray breakpoint, fix mypy warning
This commit is contained in:
		
							parent
							
								
									68f748e99e
								
							
						
					
					
						commit
						ce2ada2617
					
				
					 2 changed files with 1 additions and 2 deletions
				
			
		| 
						 | 
					@ -92,5 +92,5 @@ class TrainingArguments():
 | 
				
			||||||
    max_instant_params: int = field(default=0, metadata={"help": "Maximum amount of paramters to optimize per step in millions"})
 | 
					    max_instant_params: int = field(default=0, metadata={"help": "Maximum amount of paramters to optimize per step in millions"})
 | 
				
			||||||
    churn_percent: int = field(default=100, metadata={"help": "The percentage of active parameters to replace when changeing active parameters"})
 | 
					    churn_percent: int = field(default=100, metadata={"help": "The percentage of active parameters to replace when changeing active parameters"})
 | 
				
			||||||
    eval_steps: int = field(default=-1, metadata={"help": "Number of optimization steps after wich to compute the evaluation loss"})
 | 
					    eval_steps: int = field(default=-1, metadata={"help": "Number of optimization steps after wich to compute the evaluation loss"})
 | 
				
			||||||
    eval_prompt: str = field(default=None, metadata={"help": "A prompt to used during eval to check if the model is learning"})
 | 
					    eval_prompt: str | None = field(default=None, metadata={"help": "A prompt to used during eval to check if the model is learning"})
 | 
				
			||||||
    reshufle_steps: int = field(default=50, metadata={"help": "Number of steps to take before changing the active parameters"})
 | 
					    reshufle_steps: int = field(default=50, metadata={"help": "Number of steps to take before changing the active parameters"})
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -218,7 +218,6 @@ def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: D
 | 
				
			||||||
            eval_dataset = dataset['eval']
 | 
					            eval_dataset = dataset['eval']
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            print('Splitting train dataset in train and validation according to `eval_dataset_size`')
 | 
					            print('Splitting train dataset in train and validation according to `eval_dataset_size`')
 | 
				
			||||||
            breakpoint()
 | 
					 | 
				
			||||||
            dataset = dataset['train'].train_test_split(
 | 
					            dataset = dataset['train'].train_test_split(
 | 
				
			||||||
                test_size=data_args.eval_dataset_size, shuffle=True, seed=42
 | 
					                test_size=data_args.eval_dataset_size, shuffle=True, seed=42
 | 
				
			||||||
            )
 | 
					            )
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue