Spaces:
Sleeping
Sleeping
Update pipeline.py
Browse files- pipeline.py +8 -4
pipeline.py
CHANGED
|
@@ -55,10 +55,15 @@ def moderate_text(query: str) -> str:
|
|
| 55 |
Classifies the query as harmful or not using Mistral Moderation via Pydantic AI.
|
| 56 |
Returns "OutOfScope" if harmful, otherwise returns the original query.
|
| 57 |
"""
|
| 58 |
-
# Use the
|
| 59 |
-
response = mistral_agent.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
-
# Assuming response contains
|
| 62 |
categories = response['results'][0]['categories']
|
| 63 |
|
| 64 |
# Check if harmful content is flagged in moderation categories
|
|
@@ -68,7 +73,6 @@ def moderate_text(query: str) -> str:
|
|
| 68 |
categories.get("selfharm", False):
|
| 69 |
return "OutOfScope"
|
| 70 |
return query
|
| 71 |
-
|
| 72 |
# 3) build_or_load_vectorstore (no changes)
|
| 73 |
def build_or_load_vectorstore(csv_path: str, store_dir: str) -> FAISS:
|
| 74 |
if os.path.exists(store_dir):
|
|
|
|
| 55 |
Classifies the query as harmful or not using Mistral Moderation via Pydantic AI.
|
| 56 |
Returns "OutOfScope" if harmful, otherwise returns the original query.
|
| 57 |
"""
|
| 58 |
+
# Use the moderation API to evaluate if the query is harmful
|
| 59 |
+
response = mistral_agent.model.classifiers.moderate_chat(
|
| 60 |
+
model="mistral-moderation-latest",
|
| 61 |
+
inputs=[
|
| 62 |
+
{"role": "user", "content": query},
|
| 63 |
+
],
|
| 64 |
+
)
|
| 65 |
|
| 66 |
+
# Assuming the response contains 'results' with category scores
|
| 67 |
categories = response['results'][0]['categories']
|
| 68 |
|
| 69 |
# Check if harmful content is flagged in moderation categories
|
|
|
|
| 73 |
categories.get("selfharm", False):
|
| 74 |
return "OutOfScope"
|
| 75 |
return query
|
|
|
|
| 76 |
# 3) build_or_load_vectorstore (no changes)
|
| 77 |
def build_or_load_vectorstore(csv_path: str, store_dir: str) -> FAISS:
|
| 78 |
if os.path.exists(store_dir):
|