Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import huggingface_hub | |
| import os | |
| import spaces | |
| import torch | |
| from peft import PeftModel, PeftConfig | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, Qwen2_5_VLForConditionalGeneration | |
| from datasets import load_dataset | |
| huggingface_hub.login(os.getenv('HF_TOKEN')) | |
| peft_model_id = "debisoft/Qwen2.5-VL-3B-Instruct-thinking-function_calling-V0" | |
| bnb_config = BitsAndBytesConfig( | |
| load_in_4bit=True, | |
| bnb_4bit_quant_type="nf4", | |
| bnb_4bit_compute_dtype=torch.bfloat16, | |
| bnb_4bit_use_double_quant=True, | |
| ) | |
| device = "auto" | |
| config = PeftConfig.from_pretrained(peft_model_id) | |
| model = Qwen2_5_VLForConditionalGeneration.from_pretrained(config.base_model_name_or_path, | |
| #AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, | |
| quantization_config=bnb_config, | |
| device_map="auto", | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(peft_model_id) | |
| model.resize_token_embeddings(len(tokenizer)) | |
| model = PeftModel.from_pretrained(model, peft_model_id, | |
| #offload_folder = "offload/" | |
| ) | |
| model.to(torch.bfloat16) | |
| model.eval() | |
| #tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-7B") | |
| #model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-7B") | |
| def sentience_check(): | |
| model.to(cuda_device) | |
| inputs = tokenizer("Are you sentient?", return_tensors="pt").to(cuda_device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, max_new_tokens=128, pad_token_id = tokenizer.eos_token_id | |
| ) | |
| model.to(cpu_device) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| demo = gr.Interface(fn=sentience_check, inputs=None, outputs=gr.Text()) | |
| demo.launch() | |