Spaces:
Sleeping
Sleeping
| import torch | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from peft import PeftModel | |
| BASE_ID = "unsloth/Llama-3.2-3B-Instruct-bnb-4bit" | |
| LORA_PATH = "./lora_model" | |
| def load_model(): | |
| tokenizer = AutoTokenizer.from_pretrained(BASE_ID, use_fast=True) | |
| # 4bit baseline 一般需要 GPU;Space 选 GPU 才建议这样跑 | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| BASE_ID, | |
| device_map="auto", | |
| torch_dtype=torch.float16, | |
| ) | |
| model = PeftModel.from_pretrained(base_model, LORA_PATH) | |
| model.eval() | |
| return tokenizer, model | |
| tokenizer, model = load_model() | |
| def respond(prompt, max_new_tokens=256, temperature=0.7, top_p=0.9): | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| out = model.generate( | |
| **inputs, | |
| max_new_tokens=int(max_new_tokens), | |
| do_sample=True, | |
| temperature=float(temperature), | |
| top_p=float(top_p), | |
| eos_token_id=tokenizer.eos_token_id, | |
| ) | |
| return tokenizer.decode(out[0], skip_special_tokens=True) | |
| demo = gr.Interface( | |
| fn=respond, | |
| inputs=[ | |
| gr.Textbox(lines=6, label="Prompt"), | |
| gr.Slider(1, 1024, value=256, step=1, label="max_new_tokens"), | |
| gr.Slider(0.0, 2.0, value=0.7, step=0.05, label="temperature"), | |
| gr.Slider(0.0, 1.0, value=0.9, step=0.01, label="top_p"), | |
| ], | |
| outputs=gr.Textbox(lines=12, label="Output"), | |
| title="Llama-3.2-1B (4bit) + LoRA Adapter", | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |