Ramon Meffert
commited on
Commit
·
a1746cf
1
Parent(s):
6aa9059
Small fixes to retrievers
Browse files
src/retrievers/es_retriever.py
CHANGED
|
@@ -5,8 +5,6 @@ from elasticsearch import Elasticsearch
|
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
import os
|
| 7 |
|
| 8 |
-
load_dotenv()
|
| 9 |
-
|
| 10 |
logger = get_logger()
|
| 11 |
|
| 12 |
|
|
@@ -19,11 +17,14 @@ class ESRetriever(Retriever):
|
|
| 19 |
es_username = os.getenv("ELASTIC_USERNAME")
|
| 20 |
|
| 21 |
self.client = Elasticsearch(
|
| 22 |
-
hosts=[es_host],
|
|
|
|
|
|
|
| 23 |
|
| 24 |
if self.client.indices.exists(index="paragraphs"):
|
| 25 |
self.dataset.load_elasticsearch_index(
|
| 26 |
-
"paragraphs", es_index_name="paragraphs",
|
|
|
|
| 27 |
else:
|
| 28 |
logger.info(f"Creating index 'paragraphs' on {es_host}")
|
| 29 |
self.dataset.add_elasticsearch_index(column="text",
|
|
|
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
import os
|
| 7 |
|
|
|
|
|
|
|
| 8 |
logger = get_logger()
|
| 9 |
|
| 10 |
|
|
|
|
| 17 |
es_username = os.getenv("ELASTIC_USERNAME")
|
| 18 |
|
| 19 |
self.client = Elasticsearch(
|
| 20 |
+
hosts=[es_host],
|
| 21 |
+
http_auth=(es_username, es_password),
|
| 22 |
+
ca_certs="./http_ca.crt")
|
| 23 |
|
| 24 |
if self.client.indices.exists(index="paragraphs"):
|
| 25 |
self.dataset.load_elasticsearch_index(
|
| 26 |
+
"paragraphs", es_index_name="paragraphs",
|
| 27 |
+
es_client=self.client)
|
| 28 |
else:
|
| 29 |
logger.info(f"Creating index 'paragraphs' on {es_host}")
|
| 30 |
self.dataset.add_elasticsearch_index(column="text",
|
src/retrievers/faiss_retriever.py
CHANGED
|
@@ -81,7 +81,7 @@ class FaissRetriever(Retriever):
|
|
| 81 |
|
| 82 |
return index
|
| 83 |
|
| 84 |
-
def retrieve(self, query: str, k: int =
|
| 85 |
def embed(q):
|
| 86 |
# Inline helper function to perform embedding
|
| 87 |
tok = self.q_tokenizer(q, return_tensors="pt", truncation=True)
|
|
|
|
| 81 |
|
| 82 |
return index
|
| 83 |
|
| 84 |
+
def retrieve(self, query: str, k: int = 50):
|
| 85 |
def embed(q):
|
| 86 |
# Inline helper function to perform embedding
|
| 87 |
tok = self.q_tokenizer(q, return_tensors="pt", truncation=True)
|