inaamullah-younas commited on
Commit
b9d32b7
ยท
verified ยท
1 Parent(s): f977aef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -0
app.py CHANGED
@@ -31,9 +31,25 @@ torch.backends.cudnn.benchmark = True
31
  llm_model = torch.compile(llm_model)
32
 
33
  # ๐Ÿ”น Initialize ChromaDB
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  chroma_client = chromadb.PersistentClient(path="./chroma_db")
35
  collection = chroma_client.get_or_create_collection(name="hepB_knowledge")
36
 
 
 
 
37
  # ๐Ÿ”น Function to Generate LLM Responses
38
  def generate_humanized_response(query, retrieved_text):
39
  """Passes retrieved chunks through Mistral-7B to improve readability."""
 
31
  llm_model = torch.compile(llm_model)
32
 
33
  # ๐Ÿ”น Initialize ChromaDB
34
+ import os
35
+ import zipfile
36
+
37
+ # ๐Ÿ”น Unzip ChromaDB database if not extracted
38
+ if not os.path.exists("./chroma_db"):
39
+ with zipfile.ZipFile("chroma_db.zip", 'r') as zip_ref:
40
+ zip_ref.extractall("./")
41
+
42
+ print("โœ… ChromaDB database loaded!")
43
+
44
+ import chromadb
45
+
46
+ # ๐Ÿ”น Load ChromaDB from local storage
47
  chroma_client = chromadb.PersistentClient(path="./chroma_db")
48
  collection = chroma_client.get_or_create_collection(name="hepB_knowledge")
49
 
50
+ print("โœ… ChromaDB initialized!")
51
+
52
+
53
  # ๐Ÿ”น Function to Generate LLM Responses
54
  def generate_humanized_response(query, retrieved_text):
55
  """Passes retrieved chunks through Mistral-7B to improve readability."""