Kalpokoch commited on
Commit
a179120
·
verified ·
1 Parent(s): 85c3f24

Update app/policy_vector_db.py

Browse files
Files changed (1) hide show
  1. app/policy_vector_db.py +20 -77
app/policy_vector_db.py CHANGED
@@ -13,18 +13,18 @@ logger = logging.getLogger(__name__)
13
 
14
  class PolicyVectorDB:
15
  """
16
- Manages the connection, population, and querying of a ChromaDB vector database
17
  for policy documents.
18
  """
19
-
20
  def __init__(self, persist_directory: str, top_k_default: int = 5, relevance_threshold: float = 0.5):
21
  self.persist_directory = persist_directory
22
  self.client = chromadb.PersistentClient(path=persist_directory, settings=Settings(allow_reset=True))
23
  self.collection_name = "neepco_dop_policies"
24
 
25
  # Using a powerful open-source embedding model.
 
26
  logger.info("Loading embedding model 'BAAI/bge-large-en-v1.5'. This may take a moment...")
27
- self.embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5', device='cpu')
28
  logger.info("Embedding model loaded successfully.")
29
 
30
  self.collection = None # Initialize collection as None for lazy loading
@@ -44,22 +44,13 @@ class PolicyVectorDB:
44
 
45
  def _flatten_metadata(self, metadata: Dict) -> Dict:
46
  """Ensures all metadata values are strings, as required by some ChromaDB versions."""
47
- def flatten_value(value):
48
- if isinstance(value, dict):
49
- return str(value)
50
- elif isinstance(value, list):
51
- return str(value)
52
- else:
53
- return str(value)
54
-
55
- return {key: flatten_value(value) for key, value in metadata.items()}
56
 
57
  def add_chunks(self, chunks: List[Dict]):
58
  """
59
  Adds a list of chunks to the vector database, skipping any that already exist.
60
  """
61
  collection = self._get_collection()
62
-
63
  if not chunks:
64
  logger.info("No chunks provided to add.")
65
  return
@@ -67,7 +58,6 @@ class PolicyVectorDB:
67
  chunks_with_ids = [c for c in chunks if c.get('id')]
68
  if len(chunks) != len(chunks_with_ids):
69
  logger.warning(f"Skipped {len(chunks) - len(chunks_with_ids)} chunks that were missing an 'id'.")
70
-
71
  if not chunks_with_ids:
72
  return
73
 
@@ -77,23 +67,24 @@ class PolicyVectorDB:
77
  if not new_chunks:
78
  logger.info("All provided chunks already exist in the database. No new data to add.")
79
  return
80
-
81
  logger.info(f"Adding {len(new_chunks)} new chunks to the vector database...")
82
-
83
  # Process in batches for efficiency
84
  batch_size = 32 # Reduced batch size for potentially large embeddings
85
  for i in range(0, len(new_chunks), batch_size):
86
  batch = new_chunks[i:i + batch_size]
 
87
  ids = [str(chunk['id']) for chunk in batch]
88
  texts = [chunk['text'] for chunk in batch]
89
  metadatas = [self._flatten_metadata(chunk.get('metadata', {})) for chunk in batch]
90
-
91
  # For BGE models, it's recommended not to add instructions to the document embeddings
92
  embeddings = self.embedding_model.encode(texts, normalize_embeddings=True, show_progress_bar=False).tolist()
93
-
94
  collection.add(ids=ids, embeddings=embeddings, documents=texts, metadatas=metadatas)
95
  logger.info(f"Added batch {i//batch_size + 1}/{(len(new_chunks) + batch_size - 1) // batch_size}")
96
-
97
  logger.info(f"Finished adding {len(new_chunks)} chunks.")
98
 
99
  def search(self, query_text: str, top_k: int = None) -> List[Dict]:
@@ -117,69 +108,23 @@ class PolicyVectorDB:
117
  n_results=k * 2, # Retrieve more to filter by threshold
118
  include=["documents", "metadatas", "distances"]
119
  )
120
-
121
- search_results = []
122
- if results and results.get('documents') and results['documents']:
123
- for i, doc in enumerate(results['documents'][0]): # Access first sublist
124
- # The distance for normalized embeddings is often interpreted as 1 - cosine_similarity
125
- relevance_score = 1 - results['distances'][0][i] # ✅ Fixed: Access distances correctly
126
- if relevance_score >= self.relevance_threshold:
127
- search_results.append({
128
- 'text': doc,
129
- 'metadata': results['metadatas'][0][i], # ✅ Fixed: Access metadatas correctly
130
- 'relevance_score': relevance_score
131
- })
132
-
133
- # Sort by relevance score and return the top_k results
134
- return sorted(search_results, key=lambda x: x['relevance_score'], reverse=True)[:k]
135
-
136
- def search_with_filters(self, query_text: str, top_k: int = None,
137
- section_filter: str = None, chunk_type_filter: str = None) -> List[Dict]:
138
- """Enhanced search with metadata filtering capability."""
139
- collection = self._get_collection()
140
-
141
- instructed_query = f"Represent this sentence for searching relevant passages: {query_text}"
142
- query_embedding = self.embedding_model.encode([instructed_query], normalize_embeddings=True).tolist()
143
-
144
- k = top_k if top_k is not None else self.top_k_default
145
-
146
- # Build where clause for filtering
147
- where_clause = {}
148
- if section_filter:
149
- where_clause["section"] = section_filter
150
- if chunk_type_filter:
151
- where_clause["chunk_type"] = chunk_type_filter
152
-
153
- try:
154
- results = collection.query(
155
- query_embeddings=query_embedding,
156
- n_results=k * 2,
157
- include=["documents", "metadatas", "distances"],
158
- where=where_clause if where_clause else None
159
- )
160
- except Exception as e:
161
- logger.warning(f"Filtered search failed, falling back to regular search: {e}")
162
- # Fall back to regular search if filtering fails
163
- results = collection.query(
164
- query_embeddings=query_embedding,
165
- n_results=k * 2,
166
- include=["documents", "metadatas", "distances"]
167
- )
168
 
169
  search_results = []
170
- if results and results.get('documents') and results['documents']:
171
- for i, doc in enumerate(results['documents'][0]): # Access first sublist
172
- relevance_score = 1 - results['distances'][0][i] # Fixed: Access distances correctly
 
 
173
  if relevance_score >= self.relevance_threshold:
174
  search_results.append({
175
  'text': doc,
176
- 'metadata': results['metadatas'][0][i], # ✅ Fixed: Access metadatas correctly
177
  'relevance_score': relevance_score
178
  })
179
 
 
180
  return sorted(search_results, key=lambda x: x['relevance_score'], reverse=True)[:k]
181
 
182
-
183
  def ensure_db_populated(db_instance: PolicyVectorDB, chunks_file_path: str) -> bool:
184
  """
185
  Checks if the DB is empty and populates it from a JSONL file if needed.
@@ -190,11 +135,10 @@ def ensure_db_populated(db_instance: PolicyVectorDB, chunks_file_path: str) -> b
190
  return True
191
 
192
  logger.info("Vector database is empty. Attempting to populate from chunks file.")
193
-
194
  if not os.path.exists(chunks_file_path):
195
  logger.error(f"Chunks file not found at '{chunks_file_path}'. Cannot populate DB.")
196
  return False
197
-
198
  chunks_to_add = []
199
  with open(chunks_file_path, 'r', encoding='utf-8') as f:
200
  for line in f:
@@ -202,7 +146,7 @@ def ensure_db_populated(db_instance: PolicyVectorDB, chunks_file_path: str) -> b
202
  chunks_to_add.append(json.loads(line))
203
  except json.JSONDecodeError:
204
  logger.warning(f"Skipping malformed line in chunks file: {line.strip()}")
205
-
206
  if not chunks_to_add:
207
  logger.warning(f"Chunks file at '{chunks_file_path}' is empty or invalid. No data to add.")
208
  return False
@@ -210,7 +154,6 @@ def ensure_db_populated(db_instance: PolicyVectorDB, chunks_file_path: str) -> b
210
  db_instance.add_chunks(chunks_to_add)
211
  logger.info("Vector database population attempt complete.")
212
  return True
213
-
214
  except Exception as e:
215
  logger.error(f"An error occurred during DB population check: {e}", exc_info=True)
216
- return False
 
13
 
14
  class PolicyVectorDB:
15
  """
16
+ Manages the connection, population, and querying of a ChromaDB vector database
17
  for policy documents.
18
  """
 
19
  def __init__(self, persist_directory: str, top_k_default: int = 5, relevance_threshold: float = 0.5):
20
  self.persist_directory = persist_directory
21
  self.client = chromadb.PersistentClient(path=persist_directory, settings=Settings(allow_reset=True))
22
  self.collection_name = "neepco_dop_policies"
23
 
24
  # Using a powerful open-source embedding model.
25
+ # Change 'cpu' to 'cuda' if a GPU is available for significantly faster embedding.
26
  logger.info("Loading embedding model 'BAAI/bge-large-en-v1.5'. This may take a moment...")
27
+ self.embedding_model = SentenceTransformer('BAAI/bge-large-en-v1.5', device='cpu')
28
  logger.info("Embedding model loaded successfully.")
29
 
30
  self.collection = None # Initialize collection as None for lazy loading
 
44
 
45
  def _flatten_metadata(self, metadata: Dict) -> Dict:
46
  """Ensures all metadata values are strings, as required by some ChromaDB versions."""
47
+ return {key: str(value) for key, value in metadata.items()}
 
 
 
 
 
 
 
 
48
 
49
  def add_chunks(self, chunks: List[Dict]):
50
  """
51
  Adds a list of chunks to the vector database, skipping any that already exist.
52
  """
53
  collection = self._get_collection()
 
54
  if not chunks:
55
  logger.info("No chunks provided to add.")
56
  return
 
58
  chunks_with_ids = [c for c in chunks if c.get('id')]
59
  if len(chunks) != len(chunks_with_ids):
60
  logger.warning(f"Skipped {len(chunks) - len(chunks_with_ids)} chunks that were missing an 'id'.")
 
61
  if not chunks_with_ids:
62
  return
63
 
 
67
  if not new_chunks:
68
  logger.info("All provided chunks already exist in the database. No new data to add.")
69
  return
70
+
71
  logger.info(f"Adding {len(new_chunks)} new chunks to the vector database...")
72
+
73
  # Process in batches for efficiency
74
  batch_size = 32 # Reduced batch size for potentially large embeddings
75
  for i in range(0, len(new_chunks), batch_size):
76
  batch = new_chunks[i:i + batch_size]
77
+
78
  ids = [str(chunk['id']) for chunk in batch]
79
  texts = [chunk['text'] for chunk in batch]
80
  metadatas = [self._flatten_metadata(chunk.get('metadata', {})) for chunk in batch]
81
+
82
  # For BGE models, it's recommended not to add instructions to the document embeddings
83
  embeddings = self.embedding_model.encode(texts, normalize_embeddings=True, show_progress_bar=False).tolist()
84
+
85
  collection.add(ids=ids, embeddings=embeddings, documents=texts, metadatas=metadatas)
86
  logger.info(f"Added batch {i//batch_size + 1}/{(len(new_chunks) + batch_size - 1) // batch_size}")
87
+
88
  logger.info(f"Finished adding {len(new_chunks)} chunks.")
89
 
90
  def search(self, query_text: str, top_k: int = None) -> List[Dict]:
 
108
  n_results=k * 2, # Retrieve more to filter by threshold
109
  include=["documents", "metadatas", "distances"]
110
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  search_results = []
113
+ if results and results.get('documents') and results['documents'][0]:
114
+ for i, doc in enumerate(results['documents'][0]):
115
+ # The distance for normalized embeddings is often interpreted as 1 - cosine_similarity
116
+ relevance_score = 1 - results['distances'][0][i]
117
+
118
  if relevance_score >= self.relevance_threshold:
119
  search_results.append({
120
  'text': doc,
121
+ 'metadata': results['metadatas'][0][i],
122
  'relevance_score': relevance_score
123
  })
124
 
125
+ # Sort by relevance score and return the top_k results
126
  return sorted(search_results, key=lambda x: x['relevance_score'], reverse=True)[:k]
127
 
 
128
  def ensure_db_populated(db_instance: PolicyVectorDB, chunks_file_path: str) -> bool:
129
  """
130
  Checks if the DB is empty and populates it from a JSONL file if needed.
 
135
  return True
136
 
137
  logger.info("Vector database is empty. Attempting to populate from chunks file.")
 
138
  if not os.path.exists(chunks_file_path):
139
  logger.error(f"Chunks file not found at '{chunks_file_path}'. Cannot populate DB.")
140
  return False
141
+
142
  chunks_to_add = []
143
  with open(chunks_file_path, 'r', encoding='utf-8') as f:
144
  for line in f:
 
146
  chunks_to_add.append(json.loads(line))
147
  except json.JSONDecodeError:
148
  logger.warning(f"Skipping malformed line in chunks file: {line.strip()}")
149
+
150
  if not chunks_to_add:
151
  logger.warning(f"Chunks file at '{chunks_file_path}' is empty or invalid. No data to add.")
152
  return False
 
154
  db_instance.add_chunks(chunks_to_add)
155
  logger.info("Vector database population attempt complete.")
156
  return True
 
157
  except Exception as e:
158
  logger.error(f"An error occurred during DB population check: {e}", exc_info=True)
159
+ return False