Spaces:
Running
on
Zero
Running
on
Zero
# Copyright 2024-present the HuggingFace Inc. team. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
from __future__ import annotations | |
import warnings | |
from dataclasses import asdict | |
from enum import Enum | |
from typing import Optional | |
import torch | |
import torch.nn as nn | |
from tqdm import tqdm | |
from transformers.pytorch_utils import Conv1D | |
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists | |
from peft.utils import TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules | |
from .config import VBLoRAConfig | |
from .layer import Linear, VBLoRALayer | |
class VBLoRAModel(BaseTuner): | |
""" | |
Creates VBLoRA model from a pretrained transformers model. | |
The method is described in detail in https://arxiv.org/abs/2405.15179. | |
Args: | |
model ([`~transformers.PreTrainedModel`]): The model to be adapted. | |
config ([`VBLoRAConfig`]): The configuration of the VBLoRA model. | |
adapter_name (`str`): The name of the adapter, defaults to `"default"`. | |
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): | |
Create empty adapter weights on meta device. Useful to speed up the loading process. | |
Returns: | |
`torch.nn.Module`: The VBLoRA model. | |
Example: | |
```py | |
>>> from transformers import AutoModelForCausalLM | |
>>> from peft import VBLoRAConfig, get_peft_model | |
>>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") | |
>>> config = VBLoRAConfig( | |
... task_type="SEQ_CLS", | |
... r=4, | |
... target_modules=["fc1", "fc2", "k_proj", "out_proj", "q_proj", "v_proj"], | |
... num_vectors=60, | |
... vector_length=256, | |
... save_only_topk_weights=True, | |
... ) | |
>>> model = get_peft_model(base_model, config) | |
``` | |
**Attributes**: | |
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. | |
- **peft_config** ([`VBLoRAConfig`]): The configuration of the VBLoRAConfig model. | |
""" | |
prefix: str = "vblora_" | |
def __init__(self, model, config, adapter_name, low_cpu_mem_usage: bool = False) -> None: | |
super().__init__(model, config, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage) | |
def _init_vblora_vector_bank(self, config: VBLoRAConfig, adapter_name: str) -> None: | |
vblora_vector_bank = torch.zeros(config.num_vectors, config.vector_length) | |
torch.nn.init.uniform_(vblora_vector_bank, -config.init_vector_bank_bound, config.init_vector_bank_bound) | |
self.vblora_vector_bank[adapter_name] = vblora_vector_bank | |
def _pre_injection_hook(self, model: nn.Module, config: VBLoRAConfig, adapter_name: str) -> None: | |
self.vblora_vector_bank = nn.ParameterDict({}) | |
def _check_new_adapter_config(self, config: VBLoRAConfig) -> None: | |
""" | |
A helper method to check the config when a new adapter is being added. | |
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. | |
""" | |
# the below todo is copied from LoRA | |
# TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check | |
# does not fully correspond to the error message. | |
if (len(self.peft_config) > 1) and (config.bias != "none"): | |
raise ValueError( | |
f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " | |
"set bias to 'none' for all adapters." | |
) | |
def _check_target_module_exists(vblora_config, key): | |
return check_target_module_exists(vblora_config, key) | |
def _create_and_replace( | |
self, | |
vblora_config, | |
adapter_name, | |
target, | |
target_name, | |
parent, | |
current_key, | |
): | |
if current_key is None: | |
raise ValueError("Current Key shouldn't be `None`") | |
bias = hasattr(target, "bias") and target.bias is not None | |
kwargs = { | |
"fan_in_fan_out": vblora_config.fan_in_fan_out, | |
"bias": bias, | |
} | |
self._init_vblora_vector_bank(vblora_config, adapter_name) | |
# TODO: add quantization support | |
if isinstance(target, Linear): | |
target.update_layer( | |
adapter_name=adapter_name, | |
vblora_vector_bank=self.vblora_vector_bank, | |
r=vblora_config.r, | |
topk=vblora_config.topk, | |
num_vectors=vblora_config.num_vectors, | |
vector_length=vblora_config.vector_length, | |
vblora_dropout=vblora_config.vblora_dropout, | |
init_logits_std=vblora_config.init_logits_std, | |
) | |
else: | |
new_module = self._create_new_module( | |
vblora_config=vblora_config, | |
vblora_vector_bank=self.vblora_vector_bank, | |
adapter_name=adapter_name, | |
target=target, | |
**kwargs, | |
) | |
if adapter_name not in self.active_adapter: | |
# adding an additional adapter: it is not automatically trainable | |
new_module.requires_grad_(False) | |
self._replace_module(parent, target_name, new_module, target) | |
def _replace_module(parent, child_name, new_module, child): | |
setattr(parent, child_name, new_module) | |
# It's not necessary to set requires_grad here, as that is handled by | |
# _mark_only_adapters_as_trainable | |
# child layer wraps the original module, unpack it | |
if hasattr(child, "base_layer"): | |
child = child.base_layer | |
if not hasattr(new_module, "base_layer"): | |
new_module.weight = child.weight | |
if hasattr(child, "bias"): | |
new_module.bias = child.bias | |
if getattr(child, "state", None) is not None: | |
if hasattr(new_module, "base_layer"): | |
new_module.base_layer.state = child.state | |
else: | |
new_module.state = child.state | |
new_module.to(child.weight.device) | |
meta = torch.device("meta") | |
# dispatch to correct device | |
for name, module in new_module.named_modules(): | |
if "vblora_" in name: | |
if not any(p.device == meta for p in module.parameters()): | |
module.to(child.weight.device) | |
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: | |
for n, p in model.named_parameters(): | |
if self.prefix not in n: | |
p.requires_grad = False | |
for active_adapter in self.active_adapters: | |
bias = self.peft_config[active_adapter].bias | |
if bias == "none": | |
continue | |
if bias == "all": | |
for n, p in model.named_parameters(): | |
if "bias" in n: | |
p.requires_grad = True | |
elif bias == "vblora_only": | |
for m in model.modules(): | |
if isinstance(m, VBLoRALayer) and hasattr(m, "bias") and m.bias is not None: | |
m.bias.requires_grad = True | |
else: | |
raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") | |
def _create_new_module(vblora_config, vblora_vector_bank, adapter_name, target, **kwargs): | |
if isinstance(target, BaseTunerLayer): | |
target_base_layer = target.get_base_layer() | |
else: | |
target_base_layer = target | |
if isinstance(target_base_layer, torch.nn.Linear): | |
if kwargs["fan_in_fan_out"]: | |
warnings.warn( | |
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " | |
"Setting fan_in_fan_out to False." | |
) | |
kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = False | |
elif isinstance(target_base_layer, Conv1D): | |
kwargs["is_target_conv_1d_layer"] = True | |
if not kwargs["fan_in_fan_out"]: | |
warnings.warn( | |
"fan_in_fan_out is set to False but the target module is `Conv1D`. " | |
"Setting fan_in_fan_out to True." | |
) | |
kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = True | |
else: | |
raise ValueError( | |
f"Target module {target} is not supported. Currently, only the following modules are supported: " | |
"`torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`." | |
) | |
new_module = Linear( | |
base_layer=target, | |
vblora_vector_bank=vblora_vector_bank, | |
adapter_name=adapter_name, | |
r=vblora_config.r, | |
num_vectors=vblora_config.num_vectors, | |
vector_length=vblora_config.vector_length, | |
topk=vblora_config.topk, | |
vblora_dropout=vblora_config.vblora_dropout, | |
init_logits_std=vblora_config.init_logits_std, | |
**kwargs, | |
) | |
return new_module | |
def __getattr__(self, name: str): | |
"""Forward missing attributes to the wrapped module.""" | |
try: | |
return super().__getattr__(name) # defer to nn.Module's logic | |
except AttributeError: | |
if name == "model": # see #1892: prevent infinite recursion if class is not initialized | |
raise | |
return getattr(self.model, name) | |
def get_peft_config_as_dict(self, inference: bool = False): | |
config_dict = {} | |
for key, value in self.peft_config.items(): | |
config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} | |
if inference: | |
config["inference_mode"] = True | |
config_dict[key] = config | |
return config | |
def _set_adapter_layers(self, enabled: bool = True) -> None: | |
for module in self.model.modules(): | |
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): | |
module.enable_adapters(enabled) | |
def enable_adapter_layers(self) -> None: | |
"""Enable all adapters. | |
Call this if you have previously disabled all adapters and want to re-enable them. | |
""" | |
self._set_adapter_layers(enabled=True) | |
def disable_adapter_layers(self) -> None: | |
"""Disable all adapters. | |
When disabling all adapters, the model output corresponds to the output of the base model. | |
""" | |
for active_adapter in self.active_adapters: | |
val = self.peft_config[active_adapter].bias | |
if val != "none": | |
msg = ( | |
f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " | |
"output as the the base model would without adaption." | |
) | |
warnings.warn(msg) | |
self._set_adapter_layers(enabled=False) | |
def set_adapter(self, adapter_name: str | list[str]) -> None: | |
"""Set the active adapter(s). | |
Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is | |
not desired, use the following code. | |
```py | |
>>> for name, param in model_peft.named_parameters(): | |
... if ...: # some check on name (ex. if 'lora' in name) | |
... param.requires_grad = False | |
``` | |
Args: | |
adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. | |
""" | |
for module in self.model.modules(): | |
if isinstance(module, VBLoRALayer): | |
if module.merged: | |
warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") | |
module.unmerge() | |
module.set_adapter(adapter_name) | |
self.active_adapter = adapter_name | |
def _prepare_adapter_config(peft_config, model_config): | |
if peft_config.target_modules is None: | |
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING: | |
raise ValueError("Please specify `target_modules` in `peft_config`") | |
peft_config.target_modules = set( | |
TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING[model_config["model_type"]] | |
) | |
return peft_config | |
def _unload_and_optionally_merge( | |
self, | |
merge=True, | |
progressbar: bool = False, | |
safe_merge: bool = False, | |
adapter_names: Optional[list[str]] = None, | |
): | |
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] | |
desc = "Unloading " + ("and merging " if merge else "") + "model" | |
for key in tqdm(key_list, disable=not progressbar, desc=desc): | |
try: | |
parent, target, target_name = _get_submodules(self.model, key) | |
except AttributeError: | |
continue | |
if hasattr(target, "base_layer"): | |
if merge: | |
target.merge(safe_merge=safe_merge, adapter_names=adapter_names) | |
self._replace_module(parent, target_name, target.get_base_layer(), target) | |
elif isinstance(target, ModulesToSaveWrapper): | |
# save any additional trainable modules part of `modules_to_save` | |
setattr(parent, target_name, target.modules_to_save[target.active_adapter]) | |
return self.model | |
def delete_adapter(self, adapter_name: str) -> None: | |
""" | |
Deletes an existing adapter. | |
Args: | |
adapter_name (str): Name of the adapter to be deleted. | |
""" | |
if adapter_name not in list(self.peft_config.keys()): | |
raise ValueError(f"Adapter {adapter_name} does not exist") | |
del self.peft_config[adapter_name] | |
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] | |
new_adapter = None | |
for key in key_list: | |
_, target, _ = _get_submodules(self.model, key) | |
if isinstance(target, VBLoRALayer): | |
target.delete_adapter(adapter_name) | |
if new_adapter is None: | |
new_adapter = target.active_adapter[:] | |
self.active_adapter = new_adapter or [] | |
def merge_and_unload( | |
self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None | |
) -> torch.nn.Module: | |
r""" | |
This method merges the VBLoRA layers into the base model. This is needed if someone wants to use the base model | |
as a standalone model. | |
Args: | |
progressbar (`bool`): | |
whether to show a progressbar indicating the unload and merge process | |
safe_merge (`bool`): | |
whether to activate the safe merging check to check if there is any potential Nan in the adapter | |
weights | |
adapter_names (`list[str]`, *optional*): | |
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults | |
to `None`. | |
Example: | |
```py | |
>>> from transformers import AutoModelForCausalLM | |
>>> from peft import PeftModel | |
>>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") | |
>>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" | |
>>> model = PeftModel.from_pretrained(base_model, peft_model_id) | |
>>> merged_model = model.merge_and_unload() | |
``` | |
""" | |
return self._unload_and_optionally_merge( | |
progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names | |
) | |
def unload(self): | |
""" | |
Gets back the base model by removing all the VBLoRA modules without merging. This gives back the original base | |
model. | |
""" | |
return self._unload_and_optionally_merge(merge=False) | |
def get_nb_savable_parameters(self, adapter="default") -> tuple[int, int]: | |
r""" | |
Returns the number of savable VB-LoRA parameters and other savable parameters. | |
""" | |
logits_params = 0 | |
vector_bank_params = 0 | |
other_params = 0 | |
for name, param in self.named_parameters(): | |
if "vblora_logits" in name: | |
logits_params += param.numel() | |
elif "vblora_vector_bank" in name: | |
vector_bank_params += param.numel() | |
elif param.requires_grad: | |
other_params += param.numel() | |
if self.peft_config[adapter].save_only_topk_weights: | |
num_vectors = self.peft_config[adapter].num_vectors | |
factor = 1 # factor to count float32-equivalent parameters | |
if num_vectors < 2**8: | |
factor = 0.25 | |
elif num_vectors < 2**15: | |
factor = 0.5 | |
elif num_vectors < 2**31: | |
factor = 1 | |
else: | |
factor = 2 | |
topk_weight_params = ( | |
logits_params / self.peft_config[adapter].num_vectors * (self.peft_config[adapter].topk - 1) | |
) | |
topk_indices_params = ( | |
logits_params / self.peft_config[adapter].num_vectors * self.peft_config[adapter].topk * factor | |
) | |
vblora_params = int(vector_bank_params + topk_weight_params + topk_indices_params) | |
else: | |
vblora_params = vector_bank_params + logits_params | |
return vblora_params, other_params | |
def print_savable_parameters(self) -> None: | |
r""" | |
Prints the number of savable VB-LoRA parameters and total savable parameters. | |
""" | |
vblora_params, other_params = self.get_nb_savable_parameters() | |
print( | |
f"VB-LoRA params to-be-saved (float32-equivalent): {vblora_params:,d} " | |
f"|| total params to-be-saved: {(vblora_params + other_params):,d}" | |
) | |