Update app.py
Browse files
app.py
CHANGED
|
@@ -87,7 +87,8 @@ MODEL_NAME_IMAGE = "gpt-4-vision-preview"
|
|
| 87 |
#verfügbare Modelle anzeigen lassen
|
| 88 |
#HuggingFace Reop ID--------------------------------
|
| 89 |
#repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
| 90 |
-
repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
|
|
|
|
| 91 |
#repo_id = "TheBloke/Yi-34B-Chat-GGUF"
|
| 92 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
| 93 |
#repo_id = "tiiuae/falcon-40b"
|
|
@@ -443,8 +444,8 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
| 443 |
#oder an Hugging Face --------------------------
|
| 444 |
print("HF Anfrage.......................")
|
| 445 |
model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
|
| 446 |
-
|
| 447 |
-
llm = HuggingFaceHub(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
|
| 448 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
| 449 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
| 450 |
print("HF")
|
|
|
|
| 87 |
#verfügbare Modelle anzeigen lassen
|
| 88 |
#HuggingFace Reop ID--------------------------------
|
| 89 |
#repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
| 90 |
+
#repo_id = "HuggingFaceH4/zephyr-7b-alpha" #das Modell ist echt gut!!! Vom MIT
|
| 91 |
+
repo_id = "google/gemma-7b"
|
| 92 |
#repo_id = "TheBloke/Yi-34B-Chat-GGUF"
|
| 93 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
| 94 |
#repo_id = "tiiuae/falcon-40b"
|
|
|
|
| 444 |
#oder an Hugging Face --------------------------
|
| 445 |
print("HF Anfrage.......................")
|
| 446 |
model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
|
| 447 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
|
| 448 |
+
#llm = HuggingFaceHub(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
|
| 449 |
#llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
| 450 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
| 451 |
print("HF")
|