Libraries & Versions:
Package Version
absl-py 1.4.0
accelerate 0.21.0
aiohttp 3.8.5
aiosignal 1.3.1
anyio 3.7.1
argon2-cffi 21.3.0
argon2-cffi-bindings 21.2.0
asttokens 2.2.1
astunparse 1.6.3
async-lru 2.0.4
async-timeout 4.0.2
attrs 23.1.0
Babel 2.12.1
backcall 0.2.0
beautifulsoup4 4.12.2
bitsandbytes 0.40.2
bleach 6.0.0
cachetools 5.3.1
certifi 2023.7.22
cffi 1.15.1
charset-normalizer 3.2.0
cmake 3.27.1
comm 0.1.4
datasets 2.14.3
debugpy 1.6.7
decorator 5.1.1
defusedxml 0.7.1
dill 0.3.7
exceptiongroup 1.1.2
executing 1.2.0
fastjsonschema 2.18.0
filelock 3.12.2
flatbuffers 23.5.26
frozenlist 1.4.0
fsspec 2023.6.0
gast 0.4.0
google-auth 2.22.0
google-auth-oauthlib 0.4.6
google-pasta 0.2.0
grpcio 1.56.2
h5py 3.9.0
huggingface-hub 0.16.4
idna 3.4
importlib-metadata 6.8.0
importlib-resources 6.0.1
ipykernel 6.25.0
ipython 8.12.2
ipython-genutils 0.2.0
ipywidgets 8.1.0
jedi 0.19.0
Jinja2 3.1.2
json5 0.9.14
jsonschema 4.18.6
jsonschema-specifications 2023.7.1
jupyter 1.0.0
jupyter-client 8.3.0
jupyter-console 6.6.3
jupyter-core 5.3.1
jupyter-events 0.7.0
jupyter-lsp 2.2.0
jupyter-server 2.7.0
jupyter-server-terminals 0.4.4
jupyterlab 4.0.4
jupyterlab-pygments 0.2.2
jupyterlab-server 2.24.0
jupyterlab-widgets 3.0.8
keras 2.10.0
Keras-Preprocessing 1.1.2
libclang 16.0.6
lit 16.0.6
Markdown 3.4.4
MarkupSafe 2.1.3
matplotlib-inline 0.1.6
mistune 3.0.1
mpmath 1.3.0
multidict 6.0.4
multiprocess 0.70.15
nbclient 0.8.0
nbconvert 7.7.3
nbformat 5.9.2
nest-asyncio 1.5.7
networkx 3.1
notebook 7.0.2
notebook-shim 0.2.3
numpy 1.24.3
nvidia-cublas-cu11 11.10.3.66
nvidia-cuda-cupti-cu11 11.7.101
nvidia-cuda-nvrtc-cu11 11.7.99
nvidia-cuda-runtime-cu11 11.7.99
nvidia-cudnn-cu11 8.5.0.96
nvidia-cufft-cu11 10.9.0.58
nvidia-curand-cu11 10.2.10.91
nvidia-cusolver-cu11 11.4.0.1
nvidia-cusparse-cu11 11.7.4.91
nvidia-nccl-cu11 2.14.3
nvidia-nvtx-cu11 11.7.91
oauthlib 3.2.2
opt-einsum 3.3.0
overrides 7.4.0
packaging 23.1
pandas 2.0.3
pandocfilters 1.5.0
parso 0.8.3
peft 0.4.0
pexpect 4.8.0
pickleshare 0.7.5
pip 20.0.2
pip-autoremove 0.10.0
pkg-resources 0.0.0
pkgutil-resolve-name 1.3.10
platformdirs 3.10.0
prometheus-client 0.17.1
prompt-toolkit 3.0.39
protobuf 3.19.6
psutil 5.9.5
ptyprocess 0.7.0
pure-eval 0.2.2
pyarrow 12.0.1
pyasn1 0.5.0
pyasn1-modules 0.3.0
pycparser 2.21
Pygments 2.16.1
pyspark 3.4.1
python-dateutil 2.8.2
python-json-logger 2.0.7
python-version 0.0.2
pytz 2023.3
PyYAML 6.0.1
pyzmq 25.1.0
qtconsole 5.4.3
QtPy 2.3.1
referencing 0.30.2
regex 2023.6.3
requests 2.31.0
requests-oauthlib 1.3.1
rfc3339-validator 0.1.4
rfc3986-validator 0.1.1
rpds-py 0.9.2
rsa 4.9
safetensors 0.3.1
scipy 1.10.1
Send2Trash 1.8.2
setuptools 44.0.0
six 1.16.0
sniffio 1.3.0
soupsieve 2.4.1
stack-data 0.6.2
sympy 1.12
tensorboard 2.10.1
tensorboard-data-server 0.6.1
tensorboard-plugin-wit 1.8.1
tensorboardX 2.6.2
tensorflow-estimator 2.10.0
tensorflow-io-gcs-filesystem 0.33.0
termcolor 2.3.0
terminado 0.17.1
tinycss2 1.2.1
tokenizers 0.13.3
tomli 2.0.1
torch 2.0.1
tornado 6.3.2
tqdm 4.65.0
traitlets 5.9.0
transformers 4.31.0
triton 2.0.0
trl 0.4.7
typing-extensions 4.5.0
tzdata 2023.3
urllib3 1.26.16
wcwidth 0.2.6
webencodings 0.5.1
websocket-client 1.6.1
Werkzeug 2.3.6
wheel 0.34.2
widgetsnbextension 4.0.8
wrapt 1.15.0
xxhash 3.3.0
yarl 1.9.2
zipp 3.16.2
Script:
`import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
TrainingArguments,
pipeline,
logging,
)
from peft import LoraConfig, PeftModel
from trl import SFTTrainer
The model that you want to train from the Hugging Face hub
model_name = "NousResearch/Llama-2-7b-chat-hf"
The instruction dataset to use
dataset_name = "mlabonne/guanaco-llama2-1k"
Fine-tuned model name
new_model = "llama-2-7b-miniguanaco"
################################################################################
QLoRA parameters
################################################################################
LoRA attention dimension
lora_r = 64
Alpha parameter for LoRA scaling
lora_alpha = 16
Dropout probability for LoRA layers
lora_dropout = 0.1
################################################################################
bitsandbytes parameters
################################################################################
Activate 4-bit precision base model loading
use_4bit = True
Compute dtype for 4-bit base models
bnb_4bit_compute_dtype = "float16"
Quantization type (fp4 or nf4)
bnb_4bit_quant_type = "nf4"
Activate nested quantization for 4-bit base models (double quantization)
use_nested_quant = False
################################################################################
TrainingArguments parameters
################################################################################
Output directory where the model predictions and checkpoints will be stored
output_dir = "./results"
Number of training epochs
num_train_epochs = 1
Enable fp16/bf16 training (set bf16 to True with an A100)
fp16 = False
bf16 = False
Batch size per GPU for training
per_device_train_batch_size = 4
Batch size per GPU for evaluation
per_device_eval_batch_size = 4
Number of update steps to accumulate the gradients for
gradient_accumulation_steps = 1
Enable gradient checkpointing
gradient_checkpointing = True
Maximum gradient normal (gradient clipping)
max_grad_norm = 0.3
Initial learning rate (AdamW optimizer)
learning_rate = 2e-4
Weight decay to apply to all layers except bias/LayerNorm weights
weight_decay = 0.001
Optimizer to use
optim = "paged_adamw_32bit"
Learning rate schedule
lr_scheduler_type = "cosine"
Number of training steps (overrides num_train_epochs)
max_steps = -1
Ratio of steps for a linear warmup (from 0 to learning rate)
warmup_ratio = 0.03
Group sequences into batches with same length
Saves memory and speeds up training considerably
group_by_length = True
Save checkpoint every X updates steps
save_steps = 0
Log every X updates steps
logging_steps = 25
################################################################################
SFT parameters
################################################################################
Maximum sequence length to use
max_seq_length = None
Pack multiple short examples in the same input sequence to increase efficiency
packing = False
Load the entire model on the GPU 0
device_map = {"": 0}
Load dataset (you can process it here)
dataset = load_dataset(dataset_name, split="train")
Load tokenizer and model with QLoRA configuration
compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
bnb_config = BitsAndBytesConfig(
load_in_4bit=use_4bit,
bnb_4bit_quant_type=bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=use_nested_quant,
)
Check GPU compatibility with bfloat16
if compute_dtype == torch.float16 and use_4bit:
major, _ = torch.cuda.get_device_capability()
if major >= 8:
print("=" * 80)
print("Your GPU supports bfloat16: accelerate training with bf16=True")
print("=" * 80)
Load base model
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map=device_map
)
model.config.use_cache = False
model.config.pretraining_tp = 1
Load LLaMA tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
Load LoRA configuration
peft_config = LoraConfig(
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
r=lora_r,
bias="none",
task_type="CAUSAL_LM",
)
Set training parameters
training_arguments = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_train_epochs,
per_device_train_batch_size=per_device_train_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
optim=optim,
save_steps=save_steps,
logging_steps=logging_steps,
learning_rate=learning_rate,
weight_decay=weight_decay,
fp16=fp16,
bf16=bf16,
max_grad_norm=max_grad_norm,
max_steps=max_steps,
warmup_ratio=warmup_ratio,
group_by_length=group_by_length,
lr_scheduler_type=lr_scheduler_type,
report_to="tensorboard"
)
Set supervised fine-tuning parameters
trainer = SFTTrainer(
model=model,
train_dataset=dataset,
peft_config=peft_config,
dataset_text_field="text",
max_seq_length=max_seq_length,
tokenizer=tokenizer,
args=training_arguments,
packing=packing,
)
Train model
trainer.train()
Save trained model
trainer.model.save_pretrained(new_model)`
Error: At: trainer.train()
- You're using a LlamaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the
__call__
method is faster than using a method to encode the text followed by a call to the pad
method to get a padded encoding.
- Error operation not supported at line 351 in file /mmfs1/gscratch/zlab/timdettmers/git/bitsandbytes/csrc/pythonInterface.c
- /arrow/cpp/src/arrow/filesystem/s3fs.cc:2598: arrow::fs::FinalizeS3 was not called even though S3 was initialized. This could lead to a segmentation fault at exit