unsloth/003微调deepseek.py

134 lines
4.1 KiB
Python

from datasets import load_dataset
from unsloth import FastLanguageModel
import torch
import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
# 加载 jsonl 文件
dataset = load_dataset("json", data_files="dataset/test_dataset.jsonl", split="train")
# 转换成 ChatML 格式的字符串字段
# example 相当于jsonl中的每一行
def to_chatml(example):
messages = example["messages"]
chat = ""
for m in messages:
# 将原始内容封装为一句话
chat += f"<|im_start|>{m['role']}\n{m['content']}<|im_end|>\n"
return {"text": chat.strip()}
# 添加 `text` 字段
dataset = dataset.map(to_chatml)
# print("\n", dataset[0])
from transformers import BitsAndBytesConfig
quant_cfg = BitsAndBytesConfig(
llm_int8_enable_fp32_cpu_offload=True,
bnb_4bit_quant_type="nf4"
)
model, tokenizer = FastLanguageModel.from_pretrained(
# "deepseek-ai/DeepSeek-V2-Lite",
"unsloth/Qwen3-14B-unsloth-bnb-4bit",
max_seq_length = 2048,
load_in_4bit=False,
load_in_8bit=True,
quantization_config=quant_cfg,
device_map="auto",
offload_folder="offload/",
trust_remote_code=True
)
model = FastLanguageModel.get_peft_model(
model,
r = 16, # Choose any number > 0! Suggested 8, 16, 32, 64, 128
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 16, # Best to choose alpha = rank or rank*2
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
from trl import SFTTrainer, SFTConfig
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset,
eval_dataset = None, # Can set up evaluation!
args = SFTConfig(
dataset_text_field = "text", # 要和 dataset中定义的字段统一
# per_device_train_batch_size = 2,
# gradient_accumulation_steps = 4, # Use GA to mimic batch size!
per_device_train_batch_size=1,
gradient_accumulation_steps=8,
warmup_steps = 5,
# num_train_epochs = 1, # Set this for 1 full training run.
max_steps = 5,
learning_rate = 2e-4, # Reduce to 2e-5 for long training runs
logging_steps = 1,
optim = "adamw_8bit",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
report_to = "none", # Use this for WandB etc
),
)
trainer.train()
# messages = [
# {"role" : "user", "content" : "请介绍一下昊天"}
# ]
# text = tokenizer.apply_chat_template(
# messages,
# tokenize = False,
# add_generation_prompt = True, # Must add for generation
# enable_thinking = False, # Disable thinking
# )
# from transformers import TextStreamer
# _ = model.generate(
# **tokenizer(text, return_tensors = "pt").to("cuda"),
# max_new_tokens = 256, # Increase for longer outputs!
# temperature = 0.7, top_p = 0.8, top_k = 20, # For non thinking
# streamer = TextStreamer(tokenizer, skip_prompt = True),
# )
# model.cpu()
# model.save_pretrained_gguf(
# "DeepSeek-V2-Lite",
# # tokenizer = tokenizer,
# # quantization_method="q4_k_m", # 或 "q8_0" # 量化模式--默认 q8_0, 可选f16, "q4_k_m", "q8_0", "q5_k_m",
# # quantization_type="q4_k_m"
# # maximum_memory_usage=0.7 # 限制使用 GPU 显存为总容量的 50%
# )
# print("*"*150, " Modefile")
# print(tokenizer._ollama_modelfile)
# print("*"*150)
# model.save_pretrained_merged("merged_model", maximum_memory_usage=0.7)
# tokenizer.save_pretrained("merged_model")
model.save_pretrained_gguf(
"Qwen3-14B-unsloth-bnb-4bit",
tokenizer,
# quantization_method="q4_k_m", # 或 "q8_0" # 量化模式--默认 q8_0, 可选f16, "q4_k_m", "q8_0", "q5_k_m",
maximum_memory_usage=0.7 # 限制使用 GPU 显存为总容量的 50%
)