Spaces:
Runtime error
Runtime error
| import os | |
| from dotenv import load_dotenv | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| # Load environment variables if needed | |
| load_dotenv() | |
| # Use the Qwen2.5-7B-Instruct-1M model from Hugging Face | |
| MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct" | |
| # Initialize tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| device_map="auto", # or "cpu", "cuda", etc. as appropriate | |
| trust_remote_code=True | |
| ) | |
| # Create pipeline | |
| qwen_pipeline = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer | |
| ) | |
| def generate_response(retrieved_texts, query, max_new_tokens=512): | |
| """ | |
| Generates a response based on the retrieved texts and query using the Qwen pipeline. | |
| Args: | |
| retrieved_texts (list): List of retrieved text strings. | |
| query (str): The user's query string. | |
| max_new_tokens (int): Maximum number of tokens for the generated answer. | |
| Returns: | |
| str: Generated response. | |
| """ | |
| # Construct a simple prompt using your retrieved context | |
| context = "\n".join(retrieved_texts) | |
| prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:" | |
| # Generate the text | |
| result = qwen_pipeline( | |
| prompt, | |
| max_new_tokens=max_new_tokens, | |
| do_sample=True, # or False if you prefer deterministic output | |
| temperature=0.7, # adjust as needed | |
| ) | |
| # Extract the generated text from the pipeline's output | |
| generated_text = result[0]["generated_text"] | |
| # Optional: Clean up the output to isolate the answer portion | |
| if "Answer:" in generated_text: | |
| answer_part = generated_text.split("Answer:")[-1].strip() | |
| else: | |
| answer_part = generated_text | |
| return answer_part | |