Update app.py
Browse files
app.py
CHANGED
|
@@ -82,7 +82,7 @@ MODEL_NAME = "gpt-3.5-turbo-16k"
|
|
| 82 |
repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
| 83 |
#repo_id = "HuggingFaceH4/zephyr-7b-alpha"
|
| 84 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
| 85 |
-
#repo_id = "tiiuae/falcon-180B-chat"
|
| 86 |
#repo_id = "Vicuna-33b"
|
| 87 |
|
| 88 |
|
|
@@ -258,9 +258,10 @@ def invoke (prompt, history, openai_api_key, rag_option, temperature=0.9, max_ne
|
|
| 258 |
#LLM auswählen (OpenAI oder HF)
|
| 259 |
###########################
|
| 260 |
#Anfrage an OpenAI
|
| 261 |
-
llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature = 0)
|
| 262 |
#oder an Hugging Face
|
| 263 |
-
#llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 64})
|
|
|
|
| 264 |
|
| 265 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
| 266 |
if (rag_option == "Chroma"):
|
|
|
|
| 82 |
repo_id = "meta-llama/Llama-2-13b-chat-hf"
|
| 83 |
#repo_id = "HuggingFaceH4/zephyr-7b-alpha"
|
| 84 |
#repo_id = "meta-llama/Llama-2-70b-chat-hf"
|
| 85 |
+
#repo_id = "tiiuae/falcon-180B-chat"
|
| 86 |
#repo_id = "Vicuna-33b"
|
| 87 |
|
| 88 |
|
|
|
|
| 258 |
#LLM auswählen (OpenAI oder HF)
|
| 259 |
###########################
|
| 260 |
#Anfrage an OpenAI
|
| 261 |
+
#llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature = 0)
|
| 262 |
#oder an Hugging Face
|
| 263 |
+
#llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 64})
|
| 264 |
+
llm = HuggingFaceHub(API_URL = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
|
| 265 |
|
| 266 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
| 267 |
if (rag_option == "Chroma"):
|