Update app.py
Browse files
app.py
CHANGED
|
@@ -109,6 +109,11 @@ MODEL_NAME_OAI_ZEICHNEN = "dall-e-3"
|
|
| 109 |
#Alternativ zeichnen: Stabe Diffusion from HF:
|
| 110 |
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
################################################
|
| 113 |
#HF Hub Zugriff ermöglichen
|
| 114 |
###############################################
|
|
@@ -443,11 +448,12 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
| 443 |
else:
|
| 444 |
#oder an Hugging Face --------------------------
|
| 445 |
print("HF Anfrage.......................")
|
| 446 |
-
model_kwargs={"temperature": 0.5, "max_length":
|
| 447 |
-
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
|
| 448 |
-
#llm = HuggingFaceHub(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
|
| 449 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
| 450 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
|
|
|
|
|
|
| 451 |
print("HF")
|
| 452 |
#Prompt an history anhängen und einen Text daraus machen
|
| 453 |
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
|
|
|
| 109 |
#Alternativ zeichnen: Stabe Diffusion from HF:
|
| 110 |
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
|
| 111 |
|
| 112 |
+
################################################
|
| 113 |
+
#Inference - Endpoint
|
| 114 |
+
################################################
|
| 115 |
+
ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>"
|
| 116 |
+
|
| 117 |
################################################
|
| 118 |
#HF Hub Zugriff ermöglichen
|
| 119 |
###############################################
|
|
|
|
| 448 |
else:
|
| 449 |
#oder an Hugging Face --------------------------
|
| 450 |
print("HF Anfrage.......................")
|
| 451 |
+
model_kwargs={"temperature": 0.5, "max_length": 1024, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
|
| 452 |
+
#llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
|
|
|
|
| 453 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
| 454 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
| 455 |
+
#Mit Inference Endpoint....
|
| 456 |
+
llm = HuggingFaceEndpoint(endpoint_url=ENDPOINT_URL, task="text-generation",model_kwargs={ "max_new_tokens": 512,"top_k": 50,"temperature": 0.1,"repetition_penalty": 1.03,},)
|
| 457 |
print("HF")
|
| 458 |
#Prompt an history anhängen und einen Text daraus machen
|
| 459 |
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|