-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathfinetune_meta_llama3_8B_instruct.py
52 lines (40 loc) · 1.51 KB
/
finetune_meta_llama3_8B_instruct.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import transformers as transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments
from trl import SFTTrainer
from transformers import BitsAndBytesConfig
import torch
import torch.nn as nn
from peft import LoraConfig, get_peft_model
from datasets import load_dataset
print("Torch version:",torch.__version__)
print("Is CUDA enabled?",torch.cuda.is_available())
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
text = "Capital of USA is"
device = "cuda:0"
inputs = tokenizer(text, return_tensors="pt").to(device)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
load_in_8bit=False
)
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias = 'none',
task_type="CAUSAL_LM",
target_modules=["q_proj","k_proj","v_proj","o_proj","gate_proj","up_proj","down_proj","lm_head"]
)
model = get_peft_model(model, config)
outputs = model.generate(**inputs, max_new_tokens=30)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
model.push_to_hub("meetrais/Meta-Llama-3-8B-Instruct-NIM-LORA",
token="HF-Access-Key",
commit_message="basic training",
private=True)