diff --git a/arguments.py b/arguments.py index ef13b1b..48b3f40 100644 --- a/arguments.py +++ b/arguments.py @@ -1,11 +1,72 @@ + +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + from dataclasses import dataclass, field -from typing import Optional +from typing import Optional, Self +from enum import Enum + + +class DatasetType(Enum): + TEXT = 1 + S2S = 2 + HUB = 3 + CHAT = 4 + + @staticmethod + def to_string(dtype: Self) -> str: + if dtype == DatasetType.TEXT: + return "text" + elif dtype == DatasetType.S2S: + return "s2s" + elif dtype == DatasetType.HUB: + return "hub" + elif dtype == DatasetType.CHAT: + return "chat" + return "invalid" + + @staticmethod + def from_string(string: str): + if string == str(DatasetType.TEXT): + return DatasetType.TEXT + elif string == str(DatasetType.S2S): + return DatasetType.S2S + elif string == str(DatasetType.HUB): + return DatasetType.HUB + elif string == str(DatasetType.CHAT): + return DatasetType.CHAT + return None + + def __str__(self): + return DatasetType.to_string(self) @dataclass class DataArguments: dataset: str = field( - metadata={"help": "A json file (s2s) or text file with the dataset to train on"} + metadata={"help": "The dataset to train on"} + ) + dataset_type: str = field( + default="text", metadata={"help": f"The type of dataset, set to one of {[e for e in DatasetType]}"} + ) + dataset_chat_template: str | None = field( + default=None, metadata={"help": "overrides the chat template to be the one set here"} ) eval_dataset_size: int = field( default=512, metadata={"help": "Size of validation dataset."} @@ -26,10 +87,6 @@ class DataArguments: default=False, metadata={"help": "If this is set the dataset is assumed to be a name of a hf-hub dataset"} ) - block_size: int = field( - default=512, - metadata={"help": "size of the blocks the text is split into for training"}, - ) @dataclass @@ -65,8 +122,9 @@ class TrainingArguments(): ) resume: bool = field(default=False, metadata={"help": 'Resume from previous checkpoint'}) ddp_find_unused_parameters: bool = field(default=True, metadata={"help": 'set if trainer should try to find unused parameters'}) - output_dir: str = field(default='./output', metadata={"help": 'The output dir for logs and checkpoints'}) + output_dir: str = field(default='./output', metadata={"help": 'The output dir for checkpoints'}) per_device_train_batch_size: int = field(default=1, metadata={"help": 'The training batch size per GPU. Increase for better speed.'}) + per_device_eval_batch_size: int = field(default=1, metadata={"help": 'The eval batch size per GPU. Increase for better speed.'}) gradient_accumulation_steps: int = field(default=16, metadata={"help": 'How many gradients to accumulate before to perform an optimizer step'}) epochs: int = field(default=3, metadata={"help": 'How many epochs to train for'}) weight_decay: float = field(default=0.0, metadata={"help": 'The L2 weight decay rate of AdamW'}) @@ -82,6 +140,7 @@ class TrainingArguments(): metadata={"help": 'Learning rate schedule. Constant a bit better than cosine, and has advantage for analysis'}) warmup_steps: float = field(default=0, metadata={"help": 'number of steps to do a warmup for'}) logging_steps: int = field(default=10, metadata={"help": 'The frequency of update steps after which to log the loss'}) + logging_dir: str = field(default='./log', metadata={"help": 'The output dir for logs'}) group_by_length: bool = field(default=False, metadata={"help": 'Group sequences into batches with same length. Saves memory and speeds up training considerably.'}) save_steps: int = field(default=250, metadata={"help": 'How often to save a model'}) @@ -92,5 +151,5 @@ class TrainingArguments(): max_instant_params: int = field(default=0, metadata={"help": "Maximum amount of paramters to optimize per step in millions"}) churn_percent: int = field(default=100, metadata={"help": "The percentage of active parameters to replace when changeing active parameters"}) eval_steps: int = field(default=-1, metadata={"help": "Number of optimization steps after wich to compute the evaluation loss"}) - eval_prompt: str = field(default=None, metadata={"help": "A prompt to used during eval to check if the model is learning"}) + eval_prompt: str | None = field(default=None, metadata={"help": "A prompt to used during eval to check if the model is learning"}) reshufle_steps: int = field(default=50, metadata={"help": "Number of steps to take before changing the active parameters"}) diff --git a/datamodules.py b/datamodules.py index 0e36a6d..05b9ff2 100644 --- a/datamodules.py +++ b/datamodules.py @@ -1,27 +1,49 @@ + +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + import copy import torch import typing import datasets import itertools import transformers +import os from dataclasses import dataclass from torch.nn.utils.rnn import pad_sequence +from tqdm import tqdm -from arguments import DataArguments +from arguments import DataArguments, DatasetType IGNORE_INDEX = -100 -def group_texts(examples, block_size: int): +def group_texts(examples, source_max_len: int): # Concatenate all texts. concatenated_examples = {k: list(itertools.chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. - if total_length >= block_size: - total_length = (total_length // block_size) * block_size + if total_length >= source_max_len: + total_length = (total_length // source_max_len) * source_max_len # Split by chunks of max_len. - result = {k: [t[i: i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items()} + result = {k: [t[i: i + source_max_len] for i in range(0, total_length, source_max_len)] for k, t in concatenated_examples.items()} result["labels"] = result["input_ids"].copy() return result @@ -135,7 +157,7 @@ def create_data_module_s2s(tokenizer: transformers.PreTrainedTokenizer, data_arg eval_dataset = dataset['eval'] else: print('Splitting train dataset in train and validation according to `eval_dataset_size`') - dataset = dataset.train_test_split( + dataset = dataset['train'].train_test_split( test_size=data_args.eval_dataset_size, shuffle=True, seed=42 ) eval_dataset = dataset['test'] @@ -175,7 +197,7 @@ def create_data_module_hub(tokenizer: transformers.PreTrainedTokenizer, data_arg eval_dataset = dataset['eval'] else: print('Splitting train dataset in train and validation according to `eval_dataset_size`') - dataset = dataset.train_test_split( + dataset = dataset['train'].train_test_split( test_size=data_args.eval_dataset_size, shuffle=True, seed=42 ) eval_dataset = dataset['test'] @@ -198,14 +220,15 @@ def create_data_module_hub(tokenizer: transformers.PreTrainedTokenizer, data_arg ) -def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: DataArguments, do_train: bool, do_eval: bool, do_predict: bool) -> typing.Dict: +def create_data_module_txt(tokenizer: transformers.PreTrainedTokenizer, + data_args: DataArguments, do_train: bool, do_eval: bool, do_predict: bool) -> typing.Dict: try: dataset = datasets.load_dataset('text', data_files={'train': [data_args.dataset]}) except FileNotFoundError as ex: raise ValueError(f"Error loading dataset from {data_args.dataset}, {ex}") - if data_args.block_size > tokenizer.model_max_length: - raise ValueError(f"Block size of {data_args.block_size} is larger than the maximum size supported by the model: {tokenizer.model_max_length}") + if data_args.source_max_len > tokenizer.model_max_length: + raise ValueError(f"Max source length of {data_args.source_max_len} is larger than the maximum size supported by the model: {tokenizer.model_max_length}") def add_newline_fn(example): example['text'] = example['text'] + '\n' @@ -218,10 +241,7 @@ def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: D eval_dataset = dataset['eval'] else: print('Splitting train dataset in train and validation according to `eval_dataset_size`') - breakpoint() - dataset = dataset['train'].train_test_split( - test_size=data_args.eval_dataset_size, shuffle=True, seed=42 - ) + dataset = dataset['train'].train_test_split(test_size=data_args.eval_dataset_size, shuffle=False) eval_dataset = dataset['test'] if 'train' in dataset: @@ -233,14 +253,14 @@ def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: D lambda example: tokenizer(example['text']), batched=True, remove_columns='text', - num_proc=32, + num_proc=os.cpu_count(), load_from_cache_file=True) train_dataset_tokenized = train_dataset_tokenized.map( - lambda example: group_texts(example, data_args.block_size), + lambda example: group_texts(example, data_args.source_max_len), batched=True, - num_proc=32, + num_proc=max(1, min(os.cpu_count(), int(len(train_dataset_tokenized['input_ids']) / (data_args.source_max_len * 10)))), load_from_cache_file=True, - desc=f"Grouping texts in chunks of {data_args.block_size}") + desc=f"Grouping texts in chunks of {data_args.source_max_len}") eval_dataset_tokenized = None if eval_dataset is not None: @@ -248,13 +268,18 @@ def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: D lambda example: tokenizer(example['text']), batched=True, remove_columns='text', - num_proc=32) + num_proc=os.cpu_count()) eval_dataset_tokenized = eval_dataset_tokenized.map( - lambda example: group_texts(example, data_args.block_size), + lambda example: group_texts(example, data_args.source_max_len), batched=True, - num_proc=32, + num_proc=max(1, min(os.cpu_count(), int(len(eval_dataset_tokenized['input_ids']) / (data_args.source_max_len * 10)))), load_from_cache_file=True, - desc=f"Grouping texts in chunks of {data_args.block_size}") + desc=f"Grouping texts in chunks of {data_args.source_max_len}") + + for ids in train_dataset_tokenized['input_ids']: + assert len(ids) == data_args.source_max_len + for ids in eval_dataset_tokenized['input_ids']: + assert len(ids) == data_args.source_max_len return dict( train_dataset=train_dataset_tokenized if do_train else None, @@ -262,3 +287,84 @@ def create_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args: D predict_dataset=eval_dataset_tokenized if do_predict else None, data_collator=transformers.default_data_collator ) + + +def create_data_module_chat(tokenizer, data_args, do_train, do_eval, do_predict): + try: + dataset = datasets.Dataset.from_json(path_or_paths=data_args.dataset) + except FileNotFoundError as ex: + raise ValueError(f"Error loading dataset from {data_args.dataset}, {ex}") + + if data_args.dataset_chat_template is not None: + tokenizer.chat_template = data_args.dataset_chat_template + + target_len = data_args.source_max_len * 0.5 + grouped_chats = list() + last_len = 0 + for row in tqdm(dataset, desc="Grouping chat messages"): + content_length = len(tokenizer(row['content'])['input_ids']) + if last_len + content_length <= target_len and len(grouped_chats) > 0: + grouped_chats[-1]['chat'].append(row) + last_len += content_length + else: + last_len = 0 + grouped_chats.append({'chat': [row]}) + dataset = datasets.Dataset.from_list(grouped_chats) + dataset = dataset.map(lambda x: {"text": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) + dataset.remove_columns('chat') + + eval_dataset = None + if do_eval or do_predict: + print('Splitting train dataset in train and validation according to `eval_dataset_size`') + dataset_split = dataset.train_test_split(test_size=data_args.eval_dataset_size, shuffle=True) + train_dataset = dataset_split["train"] + eval_dataset = dataset_split["test"] + + data_collator = DataCollatorForCausalLMText( + tokenizer=tokenizer, + max_len=data_args.source_max_len, + ) + return dict( + train_dataset=train_dataset if do_train else None, + eval_dataset=eval_dataset, + predict_dataset=eval_dataset, + data_collator=data_collator + ) + + +def get_data_loaders(tokenizer, data_args: DataArguments, batch_size: int, eval_batch_size: int, + do_train: bool, do_eval: bool, do_predict: bool = False): + data_type = DatasetType.from_string(data_args.dataset_type) + if data_type == DatasetType.S2S: + print("Loading dataset in s2s mode") + data_module = create_data_module_s2s(tokenizer, data_args, do_train, do_eval, do_predict) + elif data_type == DatasetType.HUB: + print("Loading dataset from hub, expecting alpaca style") + data_module = create_data_module_hub(tokenizer, data_args, do_train, do_eval, do_predict) + elif data_type == DatasetType.TEXT: + print("Loading dataset in txt mode") + data_module = create_data_module_txt(tokenizer, data_args, do_train, do_eval, do_predict) + elif data_type == DatasetType.CHAT: + print("Loading dataset in chat mode") + data_module = create_data_module_chat(tokenizer, data_args, do_train, do_eval, do_predict) + else: + raise RuntimeError("Unkown dataset type") + + train_dataloader = None + eval_dataloader = None + + if do_train: + train_dataloader = torch.utils.data.DataLoader( + data_module['train_dataset'], + shuffle=True, + collate_fn=data_module['data_collator'], + batch_size=batch_size + ) + if do_eval: + eval_dataloader = torch.utils.data.DataLoader( + data_module['eval_dataset'], + shuffle=True, + collate_fn=data_module['data_collator'], + batch_size=eval_batch_size + ) + return train_dataloader, eval_dataloader diff --git a/dyntrainmodel.py b/dyntrainmodel.py index e6a1638..68ce7c0 100644 --- a/dyntrainmodel.py +++ b/dyntrainmodel.py @@ -1,3 +1,23 @@ + +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + from transformers import AutoModelForCausalLM import torch from utils import replace_module @@ -68,7 +88,9 @@ class LinearGroup: class DyntrainModel: def __init__(self, model_name_or_path: str, cache_dir: str | None, quantize: bool, - target_active_params: int, reshuffle_fraction: float, gradient_checkpointing: bool, trust_remote_code: bool = False): + target_active_params: int, train_static_params: bool, + reshuffle_fraction: float, gradient_checkpointing: bool, + trust_remote_code: bool = False): self.model = AutoModelForCausalLM.from_pretrained( model_name_or_path, cache_dir=cache_dir, @@ -82,6 +104,7 @@ class DyntrainModel: raise RuntimeError("reshuffle_percent must be between 0.1 and 1.0") self.devices = list[torch.device]() self.inital_reshufle = True + self.train_static_params = train_static_params if gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) @@ -167,8 +190,14 @@ class DyntrainModel: def staticParameterCount(self) -> int: return sum(p.numel() for p in self.staticParameters()) + def activeDynamicParameterCount(self) -> int: + return sum(p.numel() for p in self.dynamicParameters() if p.requires_grad) + def activeParameterCount(self) -> int: - total_params = self.dynamicParameters() + self.staticParameters() + if self.train_static_params: + total_params = self.dynamicParameters() + self.staticParameters() + else: + total_params = self.dynamicParameters() return sum(p.numel() for p in total_params if p.requires_grad) def getDistanceAndErrorSample(self) -> (torch.Tensor, torch.Tensor): @@ -187,7 +216,7 @@ class DyntrainModel: params = self.activeParameterCount() if params >= self.target_active_params: - RuntimeError("Insuficant active parameters to suffle active") + raise RuntimeError("Insuficant active parameters to suffle active") while params < self.target_active_params and len(self.frozen_linear_groups) > 0: i = randint(0, len(self.frozen_linear_groups) - 1) group = self.frozen_linear_groups.pop(i) @@ -199,7 +228,7 @@ class DyntrainModel: active_params = self.activeParameterCount() - assert self.target_active_params * 1.3 > active_params and self.target_active_params * 0.7 < active_params + assert self.target_active_params * 1.4 > active_params and self.target_active_params * 0.6 < active_params def activeParamtersByDevice(self) -> list[int]: out = [0] * len(self.devices) @@ -213,7 +242,7 @@ class DyntrainModel: for i, count in enumerate(active_counts): memory = torch.cuda.get_device_properties(self.devices[i]).total_memory if i == 0: - memory = int(memory * 0.8) + memory = int(memory * 0.5) bits_per_param.append(count / memory) max_index, max_bits_per_param = max(enumerate(active_counts), key=lambda x: x[1]) @@ -223,7 +252,7 @@ class DyntrainModel: if group.getDevice() is self.devices[max_index]: memory = torch.cuda.get_device_properties(self.devices[max_index]).total_memory if max_index == 0: - memory = int(memory * 0.8) + memory = int(memory * 0.5) swing = group.paramCount() / memory if max_bits_per_param - swing > min_bits_per_param + swing: group.inplaceTo(device=self.devices[min_index]) diff --git a/gpl-3.0.txt b/gpl-3.0.txt new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/gpl-3.0.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/modules.py b/modules.py index 9ff9dee..061f6a6 100644 --- a/modules.py +++ b/modules.py @@ -1,3 +1,23 @@ + +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + import torch import bitsandbytes as bnb import torch.multiprocessing as multiprocessing @@ -108,7 +128,7 @@ class DynamicConvertingLinear(Linear): class DynamicQantizedLinear(Linear): def __init__(self, in_features: int, out_features: int, bias: bool, active_device: torch.device, cold_device: torch.device, - output_dtype=None, compute_dtype=None, output_device=None): + output_dtype=None, compute_dtype=None, output_device=None, cold_dtype=torch.float32): super().__init__(in_features, out_features, bias, cold_device, torch.float32) self.active_device = active_device self.cold_device = cold_device @@ -120,8 +140,8 @@ class DynamicQantizedLinear(Linear): self.bias_quantized = None self.bias_state = None self.block_size = 128 - self.quant_type = 'nf4' - self.weight_start = self.weight.clone().detach() + #self.weight_start = self.weight.clone().detach() + self.cold_dtype = cold_dtype @classmethod def fromLinear(cls, in_module: torch.nn.Linear, active_device: torch.device = torch.device("cuda:0"), cold_device: torch.device = torch.device("cpu"), @@ -131,19 +151,19 @@ class DynamicQantizedLinear(Linear): compute_dtype=compute_dtype, output_device=output_device) new_module.weight = torch.nn.Parameter(in_module.weight.to(torch.float32).to(cold_device)) new_module.bias = torch.nn.Parameter(in_module.bias.to(torch.float32).to(cold_device)) if new_module.bias is not None else None - new_module.weight_start = new_module.weight.clone().detach() + #new_module.weight_start = new_module.weight.clone().detach() return new_module def compress(self) -> None: - weight = self.weight.contiguous().to(torch.float16).cuda(self.active_device) + weight = self.weight.contiguous().to(torch.float16).to(self.active_device) self.weight_quantized, self.weight_state = bnb.functional.quantize_blockwise(weight, blocksize=self.block_size) if self.bias is not None: - bias = self.bias.contiguous().to(torch.float16).cuda(self.active_device) + bias = self.bias.contiguous().to(torch.float16).to(self.active_device) self.bias_quantized, self.bias_state = bnb.functional.quantize_blockwise(bias, blocksize=self.block_size) frozen = self.isFrozen() - self.weight = torch.nn.Parameter(self.weight.to(self.cold_device)) - self.bias = torch.nn.Parameter(self.bias.to(self.cold_device)) if self.bias is not None else None + self.weight = torch.nn.Parameter(self.weight.to(self.cold_dtype).to(self.cold_device)) + self.bias = torch.nn.Parameter(self.bias.to(self.cold_dtype).to(self.cold_device)) if self.bias is not None else None self.setFrozen(frozen, False) def decompress(self) -> None: @@ -151,16 +171,16 @@ class DynamicQantizedLinear(Linear): self.weight_state = None self.bias_quantized = None self.bias_state = None - self.weight_start = self.weight.clone().detach().to(self.cold_device) - self.weight = torch.nn.Parameter(self.weight.to(self.active_device)) + #self.weight_start = self.weight.clone().detach().to(self.cold_device) + self.weight = torch.nn.Parameter(self.weight.to(self.active_device).to(torch.float32)) if self.bias_quantized: - self.bias = torch.nn.Parameter(self.bias.to(self.active_device)) + self.bias = torch.nn.Parameter(self.bias.to(self.active_device).to(torch.float32)) def getDistanceAndError(self) -> tuple[torch.Tensor, torch.Tensor]: original_weight = self.weight.contiguous().to(self.active_device).to(torch.float16) quantized_original_weight, quantized_original_state = bnb.functional.quantize_blockwise(original_weight, blocksize=self.block_size) dequantized_original_weight = bnb.functional.dequantize_blockwise(quantized_original_weight, quantized_original_state).to(original_weight.dtype) - distance = (self.weight_start - self.weight.to(self.cold_device)).to(torch.float32) + distance = torch.zeros((2)) #(self.weight_start - self.weight.to(self.cold_device)).to(torch.float32) error = (dequantized_original_weight - original_weight).to(torch.float32) return (distance, error) diff --git a/tokenizer.py b/tokenizer.py index c16f3df..cf8a04a 100644 --- a/tokenizer.py +++ b/tokenizer.py @@ -1,3 +1,23 @@ + +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + import transformers from arguments import ModelArguments @@ -30,13 +50,13 @@ def smart_tokenizer_and_embedding_resize( def get_tokenizer(model, cache_dir, model_args: ModelArguments): - print(f'Tokenizer: {model_args.tokenizer if model_args.tokenizer is not None else model_args.model_name_or_path}') + tokenizer_path = model_args.tokenizer if model_args.tokenizer is not None else model_args.model_name_or_path + print(f'Tokenizer: {tokenizer_path}') tokenizer = transformers.AutoTokenizer.from_pretrained( - model_args.tokenizer if model_args.tokenizer is not None else model_args.model_name_or_path, + tokenizer_path, cache_dir=cache_dir, padding_side="right", use_fast=False, - eos_token="[EOS]", tokenizer_type='llama' if 'llama' in model_args.model_name_or_path else None, trust_remote_code=model_args.trust_remote_code ) diff --git a/train_dynamic.py b/train_dynamic.py index 96ff497..0a64e19 100644 --- a/train_dynamic.py +++ b/train_dynamic.py @@ -1,6 +1,24 @@ -import transformers -from transformers import get_scheduler +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + +import transformers import torch from torch.utils import tensorboard import os @@ -8,9 +26,10 @@ import shutil import math from tqdm.auto import tqdm import gc +import sys from arguments import DataArguments, ModelArguments, TrainingArguments -from datamodules import create_data_module_s2s, create_data_module, create_data_module_hub +from datamodules import get_data_loaders from tokenizer import get_tokenizer from dyntrainmodel import DyntrainModel @@ -19,7 +38,16 @@ from dyntrainmodel import DyntrainModel def save_model(model, global_step: int, output_dir: str, max_checkpoints: int = 0): output_chkpt_dir = f"step_{global_step}" if global_step >= 0 else "" output_dir = os.path.join(output_dir, output_chkpt_dir) + + print(f"saveing model to {output_chkpt_dir}") + + temperature = model.generation_config.temperature + top_p = model.generation_config.top_p + model.generation_config.temperature = None + model.generation_config.top_p = None model.save_pretrained(output_dir) + model.generation_config.temperature = temperature + model.generation_config.top_p = top_p if max_checkpoints > 0: files = [f for f in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, f)) and f.startswith("step_")] @@ -57,37 +85,85 @@ def get_optimizer(dyamic_parameters: list[torch.nn.Parameter], static_parameters return optimizer +def move_optimizer_param(param, device: torch.device, device_map: dict): + if isinstance(param, torch.Tensor): + move_device = device if device is not None else device_map[id(param)] + assert device is not None or move_device != torch.device("cpu") + old_device = param.device + param.data = param.data.to(move_device) + if param._grad is not None: + param._grad.data = param._grad.data.to(move_device) + if device is not None and id(param) not in device_map: + device_map[id(param)] = old_device + assert old_device != torch.device("cpu") + elif isinstance(param, dict): + for subparam in param.values(): + move_optimizer_param(subparam, device, device_map) + + +def suspend_optimizer(optimizer) -> dict: + device_map = dict() + for param in optimizer.state.values(): + move_optimizer_param(param, torch.device("cpu"), device_map) + return device_map + + +def resume_optimizer(optimizer, device_map: dict): + for param in optimizer.state.values(): + move_optimizer_param(param, None, device_map) + + def evaluate(model: DyntrainModel, tokenizer, dataloader: torch.utils.data.DataLoader, globalstep: int, - log_writer: tensorboard.SummaryWriter, eval_prompt: str = None): - print("*** Eval ***") - loss = torch.zeros((1), device="cuda:0") - model.model.eval() - for batch in dataloader: - for key in batch: - batch[key] = batch[key].to("cuda:0") - outputs = model.model(**batch) - loss += outputs.loss - loss = loss / len(dataloader) - log_writer.add_scalar("Loss/Eval", loss, globalstep) - print(f"Eval Loss {loss.item()}") - return loss.item() + log_writer: tensorboard.SummaryWriter, eval_prompt: str | None = None): + with torch.no_grad(): + loss = torch.zeros((1), device="cuda:0") + model.model.eval() - if eval_prompt is not None: - input_ids = tokenizer(eval_prompt, return_tensors="pt").input_ids.to(model.devices[0]) - attention_mask = torch.ones(input_ids.shape, device=model.devices[0], requires_grad=False) - outputs = model.generate(input_ids, attention_mask=attention_mask, do_sample=True, temperature=1, max_new_tokens=100) - response_decoded = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] - print(f"Eval generation: response_decoded") - log_writer.add_text("Text/Eval", response_decoded, globalstep) + for batch in tqdm(dataloader, desc="Doing eval"): + for key in batch: + batch[key] = batch[key].to("cuda:0") + outputs = model.model(**batch) + loss += outputs.loss + loss = loss / len(dataloader) + log_writer.add_scalar("Loss/Eval", loss, globalstep) + print(f"Eval Loss {loss.item()}") + + if eval_prompt is not None: + input_ids = tokenizer(eval_prompt, return_tensors="pt").input_ids.to(model.devices[0]) + attention_mask = torch.ones(input_ids.shape, device=model.devices[0], requires_grad=False) + outputs = model.model.generate(input_ids, attention_mask=attention_mask, do_sample=True, temperature=1, + max_new_tokens=100, min_new_tokens=100) + response_decoded = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] + print(f"Eval generation: {response_decoded}") + log_writer.add_text("Text/Eval", response_decoded, globalstep) + model.model.train() + + +def max_vram_allocated(): + max_vram_alloc = 0 + for i in range(0, torch.cuda.device_count()): + max_vram_alloc = max(torch.cuda.memory_allocated(i), max_vram_alloc) + return max_vram_alloc + + +def min_vram_allocated(): + max_vram_alloc = sys.maxsize + for i in range(0, torch.cuda.device_count()): + max_vram_alloc = min(torch.cuda.memory_allocated(i), max_vram_alloc) + return max_vram_alloc def train(model_args: ModelArguments, data_args: DataArguments, training_args: TrainingArguments): - log_writer = tensorboard.SummaryWriter() + log_writer = tensorboard.SummaryWriter(log_dir=training_args.logging_dir) - model = DyntrainModel(model_args.model_name_or_path, training_args.cache_dir, target_active_params=int(training_args.max_instant_params * 1e6), - reshuffle_fraction=training_args.churn_percent / 100.0, gradient_checkpointing=True, trust_remote_code=True, - quantize=model_args.quantize) + model = DyntrainModel(model_args.model_name_or_path, training_args.cache_dir, + quantize=model_args.quantize, + target_active_params=int(training_args.max_instant_params * 1e6), + train_static_params=training_args.train_non_linear_layers, + reshuffle_fraction=training_args.churn_percent / 100.0, + gradient_checkpointing=True, + trust_remote_code=True) devices = list(torch.device(i) for i in range(0, torch.cuda.device_count())) model.toDevices(devices) model.reshuffleActive() @@ -96,32 +172,15 @@ def train(model_args: ModelArguments, data_args: DataArguments, training_args: T paramter_count = sum(p.numel() for p in model.model.parameters()) active_paramter_count = sum(p.numel() for p in model.model.parameters() if p.requires_grad) static_parameter_count = model.staticParameterCount() if training_args.train_non_linear_layers else 0 - print(f"Training model with {paramter_count / 1e6}m parameters and {active_paramter_count / 1e6}m" + print(f"Training model with {paramter_count / 1e6}m parameters and {active_paramter_count / 1e6}m " f"instantanous active paramters of which {static_parameter_count} are static") tokenizer = get_tokenizer(model.model, training_args.cache_dir, model_args) - if data_args.dataset.endswith("json"): - print("Loading dataset in s2s mode") - data_module = create_data_module_s2s(tokenizer, data_args, training_args.do_train, training_args.do_eval, False) - elif data_args.data_from_hub: - data_module = create_data_module_hub(tokenizer, data_args, training_args.do_train, training_args.do_eval, False) - else: - print("Loading dataset in txt mode") - data_module = create_data_module(tokenizer, data_args, training_args.do_train, training_args.do_eval, False) - dataset = {k: v for k, v in data_module.items() if k != 'predict_dataset'} - train_dataloader = torch.utils.data.DataLoader( - dataset['train_dataset'], - shuffle=True, - collate_fn=dataset['data_collator'], - batch_size=training_args.per_device_train_batch_size - ) if dataset['train_dataset'] is not None else None - eval_dataloader = torch.utils.data.DataLoader( - dataset['eval_dataset'], - shuffle=True, - collate_fn=dataset['data_collator'], - batch_size=training_args.per_device_train_batch_size - ) if dataset['eval_dataset'] is not None else None + train_dataloader, eval_dataloader = get_data_loaders(tokenizer, data_args, + training_args.per_device_train_batch_size, + training_args.per_device_eval_batch_size, + training_args.do_train, training_args.do_eval) dynamic_param_ratio = (model.staticParameterCount() + model.dynamicParameterCount()) / model.dynamicParameterCount() steps_per_epoch = math.ceil(len(train_dataloader) / training_args.gradient_accumulation_steps) if train_dataloader is not None else 1 @@ -135,7 +194,7 @@ def train(model_args: ModelArguments, data_args: DataArguments, training_args: T training_args.adam_epsilon, training_args.adam8bit) - lr_scheduler = get_scheduler( + lr_scheduler = transformers.get_scheduler( name=training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=training_args.warmup_steps, @@ -147,13 +206,11 @@ def train(model_args: ModelArguments, data_args: DataArguments, training_args: T global_step = 0 model.model.train() for epoch in range(0, training_args.epochs): - model.model.train() print("*** Train ***") - print(f'Vram used for model before training starts: {torch.cuda.memory_allocated()/(1024.0*1024.0)}') + print(f'Vram used for model before training starts: {torch.cuda.memory_allocated()/(1024.0**3):.2f}') for step, batch in enumerate(train_dataloader): for key in batch: batch[key] = batch[key].to("cuda:0") - outputs = model.model(**batch) loss = outputs.loss / training_args.gradient_accumulation_steps loss.backward() @@ -164,48 +221,54 @@ def train(model_args: ModelArguments, data_args: DataArguments, training_args: T optimizer.step() lr_scheduler.step() + progress_bar.set_postfix_str(f"Loss: {loss.item():.2f} Max: {max_vram_allocated()/(1024.0**3):.2f}GB" + f" Min: {min_vram_allocated()/(1024.0**3):.2f}GB") + model.model.zero_grad() - if global_step % 5 == 0: - print(f"Train Loss {loss.item()}") + if global_step > 0: + if global_step % training_args.reshufle_steps == 0 and training_args.max_instant_params != 0: + print("Reshuffleing") + lr_scheduler.optimizer = None + del optimizer + # distance, error = model.getDistanceAndErrorSample() + # log_writer.add_histogram("Distances/Train", distance, max_bins=50) + # log_writer.add_histogram("Errors/Train", error, max_bins=50) - if global_step % training_args.reshufle_steps == 0 and training_args.max_instant_params != 0: - print("Reshuffleing") - lr_scheduler.optimizer = None - del optimizer - # distance, error = model.getDistanceAndErrorSample() - # log_writer.add_histogram("Distances/Train", distance, max_bins=50) - # log_writer.add_histogram("Errors/Train", error, max_bins=50) + model.reshuffleActive() + model.balanceActive() + log_writer.add_scalar("Parameters/train", model.activeParameterCount(), global_step) + optimizer = get_optimizer(model.dynamicParameters(), + model.staticParameters() if training_args.train_non_linear_layers else None, + training_args.learning_rate, + training_args.learning_rate / dynamic_param_ratio, + training_args.weight_decay, + training_args.adam_epsilon, + training_args.adam8bit) + lr_scheduler.optimizer = optimizer - model.reshuffleActive() - model.balanceActive() - log_writer.add_scalar("Parameters/train", model.activeParameterCount(), global_step) - optimizer = get_optimizer(model.dynamicParameters(), - model.staticParameters() if training_args.train_non_linear_layers else None, - training_args.learning_rate, - training_args.learning_rate / dynamic_param_ratio, - training_args.weight_decay, - training_args.adam_epsilon, - training_args.adam8bit) - lr_scheduler.optimizer = optimizer + if global_step % training_args.save_steps == 0: + save_model(model.model, global_step, training_args.output_dir, training_args.max_checkpoints) + if training_args.eval_steps > 0 and global_step % training_args.eval_steps == 0: + device_map = suspend_optimizer(optimizer) + evaluate(model, tokenizer, eval_dataloader, global_step, log_writer, training_args.eval_prompt) + resume_optimizer(optimizer, device_map) global_step += 1 progress_bar.update() - if global_step > 0: - if global_step % training_args.save_steps == 0: - save_model(model.model, global_step, training_args.output_dir, training_args.max_checkpoints) - if training_args.eval_steps > 0 and global_step % training_args.save_steps == 0: - evaluate(model, eval_dataloader, global_step, log_writer, training_args.eval_prompt) if training_args.flush_allocator: gc.collect() torch.cuda.empty_cache() if training_args.do_eval and training_args.eval_steps == -1: - evaluate(model, eval_dataloader, global_step, log_writer, training_args.eval_prompt) + device_map = suspend_optimizer(optimizer) + evaluate(model, tokenizer, eval_dataloader, global_step, log_writer, training_args.eval_prompt) + resume_optimizer(optimizer, device_map) + + del optimizer - # Evaluation if training_args.do_eval: - evaluate(model, eval_dataloader, global_step, log_writer, training_args.eval_prompt) + evaluate(model, tokenizer, eval_dataloader, global_step, log_writer, training_args.eval_prompt) save_model(model.model, global_step, training_args.output_dir) diff --git a/tune.sh b/tune.sh new file mode 100755 index 0000000..2a59aae --- /dev/null +++ b/tune.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . +# + +BASE_DIR=$(dirname "$0") +VENV_DIR=$(venvget) + +export MAX_JOBS=48 + +export ROCR_VISIBLE_DEVICES="1,2" +source $VENV_DIR/bin/activate + +python $SCRIPTS/train_dyamic/train_dynamic.py \ + --model_name_or_path "huggyllama/llama-7b" \ + --dataset "tatsu-lab/alpaca" \ + --dataset_type "hub" \ + --eval_dataset_size 200 \ + --source_max_len 1024 \ + --do_train \ + --do_eval \ + --eval_steps 100 \ + --reshufle_steps 50 \ + --per_device_train_batch_size 2 \ + --per_device_eval_batch_size 1 \ + --gradient_checkpointing True \ + --gradient_accumulation_steps 4 \ + --epochs 3 \ + --logging_dir $BASE_DIR/log \ + --logging_steps 5 \ + --learning_rate 1e-6 \ + --save_steps 500 \ + --output_dir $BASE_DIR/llama-7b-quant \ + --adam8bit \ + --churn_percent 100\ + --max_instant_params 3000 \ + --quantize diff --git a/utils.py b/utils.py index c58bc06..8f99d16 100644 --- a/utils.py +++ b/utils.py @@ -1,3 +1,23 @@ + +# QRotaryTraining - A novel method for fully training all parameters of large +# language models (llms) while using less device memory than traditional methods. +# Copyright (C) 2024 Carl Philipp Klemm +# +# This file is part of QRotaryTraining. +# +# QRotaryTraining is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# QRotaryTraining is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QRotaryTraining. If not, see . + from peft.utils import _get_submodules import torch