不同模型保存为gguf格式的方法不同?

This commit is contained in:
haotian 2025-07-18 09:42:01 +08:00
parent ff312950d1
commit f213a68bc6
5 changed files with 42 additions and 14 deletions

View File

@ -84,6 +84,8 @@ trainer = SFTTrainer(
trainer.train()
messages = [
{"role" : "user", "content" : "请介绍一下昊天"}
]
@ -110,3 +112,7 @@ model.save_pretrained_gguf(
# quantization_method="q4_k_m", # 或 "q8_0" # 量化模式--默认 q8_0, 可选f16, "q4_k_m", "q8_0", "q5_k_m",
maximum_memory_usage=0.7 # 限制使用 GPU 显存为总容量的 50%
)
# print("*"*150, " Modefile")
# print(tokenizer._ollama_modelfile)
# print("*"*150)

View File

@ -1,8 +1,8 @@
from datasets import load_dataset
from unsloth import FastLanguageModel
import torch
# import os
# os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
# 加载 jsonl 文件
dataset = load_dataset("json", data_files="dataset/test_dataset.jsonl", split="train")
@ -30,8 +30,9 @@ quant_cfg = BitsAndBytesConfig(
)
model, tokenizer = FastLanguageModel.from_pretrained(
"deepseek-ai/DeepSeek-V2-Lite",
max_seq_length = 1024,
# "deepseek-ai/DeepSeek-V2-Lite",
"unsloth/Qwen3-14B-unsloth-bnb-4bit",
max_seq_length = 2048,
load_in_4bit=False,
load_in_8bit=True,
quantization_config=quant_cfg,
@ -44,10 +45,10 @@ model, tokenizer = FastLanguageModel.from_pretrained(
model = FastLanguageModel.get_peft_model(
model,
r = 8, # Choose any number > 0! Suggested 8, 16, 32, 64, 128
r = 16, # Choose any number > 0! Suggested 8, 16, 32, 64, 128
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 8, # Best to choose alpha = rank or rank*2
lora_alpha = 16, # Best to choose alpha = rank or rank*2
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
@ -73,7 +74,7 @@ trainer = SFTTrainer(
gradient_accumulation_steps=8,
warmup_steps = 5,
# num_train_epochs = 1, # Set this for 1 full training run.
max_steps = 30,
max_steps = 5,
learning_rate = 2e-4, # Reduce to 2e-5 for long training runs
logging_steps = 1,
optim = "adamw_8bit",
@ -108,10 +109,26 @@ trainer.train()
# model.cpu()
# model.save_pretrained_gguf(
# "DeepSeek-V2-Lite",
# # tokenizer = tokenizer,
# # quantization_method="q4_k_m", # 或 "q8_0" # 量化模式--默认 q8_0, 可选f16, "q4_k_m", "q8_0", "q5_k_m",
# # quantization_type="q4_k_m"
# # maximum_memory_usage=0.7 # 限制使用 GPU 显存为总容量的 50%
# )
# print("*"*150, " Modefile")
# print(tokenizer._ollama_modelfile)
# print("*"*150)
# model.save_pretrained_merged("merged_model", maximum_memory_usage=0.7)
# tokenizer.save_pretrained("merged_model")
model.save_pretrained_gguf(
"DeepSeek-V2-Lite",
"Qwen3-14B-unsloth-bnb-4bit",
tokenizer,
# quantization_method="q4_k_m", # 或 "q8_0" # 量化模式--默认 q8_0, 可选f16, "q4_k_m", "q8_0", "q5_k_m",
# quantization_type="q4_k_m"
# maximum_memory_usage=0.7 # 限制使用 GPU 显存为总容量的 50%
)
maximum_memory_usage=0.7 # 限制使用 GPU 显存为总容量的 50%
)

View File

@ -1,4 +1,4 @@
FROM ./unsloth.Q8_0.gguf
FROM .Qwen3-8B/unsloth.Q4_K_M.gguf
TEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
@ -6,3 +6,8 @@ TEMPLATE """{{ if .System }}<|im_start|>system
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
"""
PARAMETER stop "<|im_start|>"
PARAMETER stop "<|im_end|>"
PARAMETER temperature 0.7
PARAMETER top_p 0.9
PARAMETER top_k 40

View File

@ -130,7 +130,7 @@ class UnslothAlignPropConfig(AlignPropConfig):
)
def __init__(
self,
exp_name = '003微调deepseek',
exp_name = '003加载自己的数据集微调',
run_name = '',
seed = 3407,
log_with = None,

View File

@ -146,7 +146,7 @@ class UnslothDDPOConfig(DDPOConfig):
)
def __init__(
self,
exp_name = '003微调deepseek',
exp_name = '003加载自己的数据集微调',
run_name = '',
seed = 3407,
log_with = None,