text
stringlengths 1
93.6k
|
|---|
after_filter_list.append(phrase)
|
if ")" in seg:
|
after_filter_list.append(''.join(i for i in seg if i == ')'))
|
return ''.join(after_filter_list)
|
def find_friend_name(gene_exp, org_question):
|
seg_list = gene_exp.split()
|
phrase_set = ["(JOIN", "(ARGMIN", "(ARGMAX", "(R", "(le", "(lt", "(ge", "(gt", "(COUNT", "(AND"]
|
temp = []
|
reg_ents = []
|
for i, seg in enumerate(seg_list):
|
if not any([ph in seg for ph in phrase_set]):
|
if seg.lower() in org_question:
|
temp.append(seg.lower())
|
if seg.endswith(')'):
|
stripped = seg.strip(')')
|
stripped_add = stripped + ')'
|
if stripped_add.lower() in org_question:
|
temp.append(stripped_add.lower())
|
reg_ents.append(" ".join(temp).lower())
|
temp = []
|
elif stripped.lower() in org_question:
|
temp.append(stripped.lower())
|
reg_ents.append(" ".join(temp).lower())
|
temp = []
|
if len(temp) != 0:
|
reg_ents.append(" ".join(temp))
|
return reg_ents
|
def get_right_mid_set(fn, id_dict, question):
|
type_to_mid_dict = {}
|
type_list = []
|
for mid in id_dict:
|
types = get_types(mid)
|
for cur_type in types:
|
if not cur_type.startswith("common.") and not cur_type.startswith("base."):
|
if cur_type not in type_to_mid_dict:
|
type_to_mid_dict[cur_type] = {}
|
type_to_mid_dict[cur_type][mid] = id_dict[mid]
|
else:
|
type_to_mid_dict[cur_type][mid] = id_dict[mid]
|
type_list.append(cur_type)
|
tokenized_type_list = [re.split('\.|_', doc) for doc in type_list]
|
# tokenized_question = tokenizer.tokenize(question)
|
tokenized_question = question.split()
|
bm25 = BM25Okapi(tokenized_type_list)
|
top10_types = bm25.get_top_n(tokenized_question, type_list, n=10)
|
selected_types = top10_types[:3]
|
selected_mids = []
|
for any_type in selected_types:
|
# logger.info("any_type: {}".format(any_type))
|
# logger.info("type_to_mid_dict[any_type]: {}".format(type_to_mid_dict[any_type]))
|
selected_mids += list(type_to_mid_dict[any_type].keys())
|
return selected_mids
|
def from_fn_to_id_set(fn_list, question, name_to_id_dict, bm25_all_fns, all_fns):
|
return_mid_list = []
|
for fn_org in fn_list:
|
drop_dot = fn_org.split()
|
drop_dot = [seg.strip('.') for seg in drop_dot]
|
drop_dot = " ".join(drop_dot)
|
if fn_org.lower() not in question and drop_dot.lower() in question:
|
fn_org = drop_dot
|
if fn_org.lower() not in name_to_id_dict:
|
logger.info("fn_org: {}".format(fn_org.lower()))
|
tokenized_query = fn_org.lower().split()
|
fn = bm25_all_fns.get_top_n(tokenized_query, all_fns, n=1)[0]
|
logger.info("sub fn: {}".format(fn))
|
else:
|
fn = fn_org
|
if fn.lower() in name_to_id_dict:
|
id_dict = name_to_id_dict[fn.lower()]
|
if len(id_dict) > 15:
|
mids = get_right_mid_set(fn.lower(), id_dict, question)
|
else:
|
mids = sorted(id_dict.items(), key=lambda x: x[1], reverse=True)
|
mids = [mid[0] for mid in mids]
|
return_mid_list.append(mids)
|
return return_mid_list
|
def convz_fn_to_mids(gene_exp, found_names, found_mids):
|
if len(found_names) == 0:
|
return gene_exp
|
start_index = 0
|
new_string = ''
|
for name, mid in zip(found_names, found_mids):
|
b_idx = gene_exp.lower().index(name)
|
e_idx = b_idx + len(name)
|
new_string = new_string + gene_exp[start_index:b_idx] + mid
|
start_index = e_idx
|
new_string = new_string + gene_exp[start_index:]
|
return new_string
|
def add_reverse(org_exp):
|
final_candi = [org_exp]
|
total_join = 0
|
list_seg = org_exp.split(" ")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.