Update validation.py
Browse files- validation.py +4 -4
validation.py
CHANGED
|
@@ -29,14 +29,14 @@ RATE_LIMIT = 3
|
|
| 29 |
@limits(calls=RATE_LIMIT, period=1)
|
| 30 |
def create_service_context():
|
| 31 |
|
| 32 |
-
|
| 33 |
max_input_size = 4096
|
| 34 |
num_outputs = 512
|
| 35 |
max_chunk_overlap = 20
|
| 36 |
-
chunk_size_limit = 600
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
#allows the user to explicitly set certain constraint parameters
|
| 39 |
-
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
|
| 40 |
|
| 41 |
#LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
|
| 42 |
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
|
|
|
|
| 29 |
@limits(calls=RATE_LIMIT, period=1)
|
| 30 |
def create_service_context():
|
| 31 |
|
| 32 |
+
# prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
|
| 33 |
max_input_size = 4096
|
| 34 |
num_outputs = 512
|
| 35 |
max_chunk_overlap = 20
|
| 36 |
+
chunk_size_limit = 600
|
| 37 |
+
prompt_helper = PromptHelper(max_input_size, num_outputs, chunk_overlap_ratio= 0.1, chunk_size_limit=chunk_size_limit)
|
| 38 |
+
# llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-4", max_tokens=num_outputs))
|
| 39 |
|
|
|
|
|
|
|
| 40 |
|
| 41 |
#LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
|
| 42 |
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
|