|
|
from vertexai import rag |
|
|
from vertexai.generative_models import GenerativeModel, Tool |
|
|
import vertexai |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vertexai.init(project=PROJECT_ID, location="us-east4") |
|
|
|
|
|
|
|
|
|
|
|
embedding_model_config = rag.RagEmbeddingModelConfig( |
|
|
vertex_prediction_endpoint=rag.VertexPredictionEndpoint( |
|
|
publisher_model="publishers/google/models/text-embedding-005" |
|
|
) |
|
|
) |
|
|
|
|
|
rag_corpus = rag.create_corpus( |
|
|
display_name=display_name, |
|
|
backend_config=rag.RagVectorDbConfig( |
|
|
rag_embedding_model_config=embedding_model_config |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
rag.import_files( |
|
|
rag_corpus.name, |
|
|
paths, |
|
|
|
|
|
transformation_config=rag.TransformationConfig( |
|
|
chunking_config=rag.ChunkingConfig( |
|
|
chunk_size=512, |
|
|
chunk_overlap=100, |
|
|
), |
|
|
), |
|
|
max_embedding_requests_per_min=1000, |
|
|
) |
|
|
|
|
|
|
|
|
rag_retrieval_config = rag.RagRetrievalConfig( |
|
|
top_k=3, |
|
|
filter=rag.Filter(vector_distance_threshold=0.5), |
|
|
) |
|
|
response = rag.retrieval_query( |
|
|
rag_resources=[ |
|
|
rag.RagResource( |
|
|
rag_corpus=rag_corpus.name, |
|
|
|
|
|
|
|
|
) |
|
|
], |
|
|
text="What is RAG and why it is helpful?", |
|
|
rag_retrieval_config=rag_retrieval_config, |
|
|
) |
|
|
print(response) |
|
|
|
|
|
|
|
|
|
|
|
rag_retrieval_tool = Tool.from_retrieval( |
|
|
retrieval=rag.Retrieval( |
|
|
source=rag.VertexRagStore( |
|
|
rag_resources=[ |
|
|
rag.RagResource( |
|
|
rag_corpus=rag_corpus.name, |
|
|
|
|
|
|
|
|
) |
|
|
], |
|
|
rag_retrieval_config=rag_retrieval_config, |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
rag_model = GenerativeModel( |
|
|
model_name="gemini-2.0-flash-001", tools=[rag_retrieval_tool] |
|
|
) |
|
|
|
|
|
|
|
|
response = rag_model.generate_content("What is RAG and why it is helpful?") |
|
|
print(response.text) |
|
|
|
|
|
|
|
|
|
|
|
|