text
stringlengths
1
93.6k
break
mix_type_num = each_type_num // 3
selected_quest = selected_quest_compose[:mix_type_num] + selected_quest_compare[:mix_type_num]
logger.info("selected_quest_compose: {}".format(selected_quest_compose))
logger.info("selected_quest_compare: {}".format(selected_quest_compare))
logger.info("selected_quest: {}".format(selected_quest))
return selected_quest_compose, selected_quest_compare, selected_quest
def sub_mid_to_fn(question, string, question_to_mid_dict):
seg_list = string.split()
mid_to_start_idx_dict = {}
for seg in seg_list:
if seg.startswith("m.") or seg.startswith("g."):
mid = seg.strip(')(')
start_index = string.index(mid)
mid_to_start_idx_dict[mid] = start_index
if len(mid_to_start_idx_dict) == 0:
return string
start_index = 0
new_string = ''
for key in mid_to_start_idx_dict:
b_idx = mid_to_start_idx_dict[key]
e_idx = b_idx + len(key)
new_string = new_string + string[start_index:b_idx] + question_to_mid_dict[question][key]
start_index = e_idx
new_string = new_string + string[start_index:]
return new_string
def type_generator(question, prompt_type, api_key, LLM_engine):
sleep(1)
prompt = prompt_type
prompt = prompt + " Question: " + question + "Type of the question: "
got_result = False
while got_result != True:
try:
openai.api_key = api_key
answer_modi = openai.Completion.create(
engine=LLM_engine,
prompt=prompt,
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["Question: "]
)
got_result = True
except:
sleep(3)
gene_exp = answer_modi["choices"][0]["text"].strip()
return gene_exp
def ep_generator(question, selected_examples, temp, que_to_s_dict_train, question_to_mid_dict, api_key, LLM_engine,
retrieval=False, corpus=None, nlp_model=None, bm25_train_full=None, retrieve_number=100):
if retrieval:
tokenized_query = nlp_model(question)
tokenized_query = [token.lemma_ for token in tokenized_query]
top_ques = bm25_train_full.get_top_n(tokenized_query, corpus, n=retrieve_number)
doc_scores = bm25_train_full.get_scores(tokenized_query)
top_score = max(doc_scores)
logger.info("top_score: {}".format(top_score))
logger.info("top related questions: {}".format(top_ques))
selected_examples = top_ques
prompt = ""
for que in selected_examples:
if not que_to_s_dict_train[que]:
continue
prompt = prompt + "Question: " + que + "\n" + "Logical Form: " + sub_mid_to_fn(que, que_to_s_dict_train[que], question_to_mid_dict) + "\n"
prompt = prompt + "Question: " + question + "\n" + "Logical Form: "
got_result = False
while got_result != True:
try:
openai.api_key = api_key
answer_modi = openai.Completion.create(
engine=LLM_engine,
prompt=prompt,
temperature=temp,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["Question: "],
n=7
)
got_result = True
except:
sleep(3)
gene_exp = [exp["text"].strip() for exp in answer_modi["choices"]]
return gene_exp
def convert_to_frame(s_exp):
phrase_set = ["(JOIN", "(ARGMIN", "(ARGMAX", "(R", "(le", "(lt", "(ge", "(gt", "(COUNT", "(AND", "(TC", "(CONS"]
seg_list = s_exp.split()
after_filter_list = []
for seg in seg_list:
for phrase in phrase_set:
if phrase in seg: