alx-d commited on
Commit
ad752e9
·
verified ·
1 Parent(s): aa7abe9

Update philosophy.py

Browse files
Files changed (1) hide show
  1. philosophy.py +18 -8
philosophy.py CHANGED
@@ -30,17 +30,27 @@ RATE_LIMIT = 3
30
  def create_service_context():
31
 
32
  # prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
33
- max_input_size = 4096
34
- num_outputs = 512
35
- max_chunk_overlap = 20
36
- chunk_size_limit = 600
37
- prompt_helper = PromptHelper(max_input_size, num_outputs, chunk_overlap_ratio= 0.1, chunk_size_limit=chunk_size_limit)
38
- # llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-4", max_tokens=num_outputs))
39
-
40
 
41
  #LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
42
- llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
 
 
 
 
 
 
 
 
 
43
 
 
 
 
44
  #constructs service_context
45
  service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
46
  return service_context
 
30
  def create_service_context():
31
 
32
  # prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
33
+ # max_input_size = 4096
34
+ # num_outputs = 512
35
+ # max_chunk_overlap = 20
36
+ # chunk_size_limit = 600
37
+ # prompt_helper = PromptHelper(max_input_size, num_outputs, chunk_overlap_ratio= 0.1, chunk_size_limit=chunk_size_limit)
 
 
38
 
39
  #LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
40
+ # llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
41
+
42
+ # Constraint parameters
43
+ max_input_size = 32768
44
+ num_outputs = 30000 # Safe upper limit to ensure total tokens do not exceed 32768
45
+ max_chunk_overlap = 20
46
+ chunk_size_limit = 600
47
+
48
+ # Allows the user to explicitly set certain constraint parameters
49
+ prompt_helper = PromptHelper(max_input_size, num_outputs, chunk_overlap_ratio=0.1, chunk_size_limit=chunk_size_limit)
50
 
51
+ # LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
52
+ llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-4-32k", max_tokens=num_outputs))
53
+
54
  #constructs service_context
55
  service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
56
  return service_context