{"seq_id": "613191457", "text": "import requests\nfrom django.conf import settings\n\n\nclass Bitrix24:\n\n def __init__(self, auth_code):\n auth_url = f'https://time-ismoney.bitrix24.ua/oauth/token/?grant_type=authorization_code&client_id={settings.APP_ID}&client_secret={settings.APP_SECRET}&code={auth_code}&scope=application_permissions&redirect_uri=application_URL'\n response = requests.post(auth_url).json()\n self.auth_token = response[\"access_token\"]\n self.refresh_token = response[\"refresh_token\"]\n return None\n\n\n def refresh_token(self, refresh_token):\n ref_url = f'https://time-ismoney.bitrix24.ua/oauth/token/?grant_type=refresh_token&client_id={APP_ID}&client_secret={APP_SECRET}&refresh_token={refresh_token}&scope=granted_permission&redirect_uri=app_URL'\n response = requests.get(ref_url).json()\n self.refresh_token = response[\"refresh_token\"]\n self.auth_token = response[\"access_token\"]\n return self.auth_token\n\n def get_user_info(self):\n url = f'https://time-ismoney.bitrix24.ua/rest/user.current?auth={self.auth_token}'\n print(url)\n res = requests.post(url).json()\n return res\n", "sub_path": "bitrix_data/auth/bitrix24.py", "file_name": "bitrix24.py", "file_ext": "py", "file_size_in_byte": 1161, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.conf.settings.APP_ID", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.settings.APP_SECRET", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}]} {"seq_id": "473459793", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 10:26:45 2018\n\n@author: abdul.ramadan\n\"\"\"\n\n# Importing the libraries\n#import numpy as np\nimport preprocessData as ppd\n\n# Importing the dataset\ndataset = ppd.generateMainDataSet()\nX = dataset.iloc[:, :1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\n\n# Fitting Simple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n#import visualizeData as vd\nvd.plotData(X_train, X_test, y_train, y_test, regressor.predict(X_train));\nprint(y_pred[-1])", "sub_path": "sentimentapi/predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "preprocessData.generateMainDataSet", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 24, "usage_type": "call"}]} {"seq_id": "279521774", "text": "from time import sleep\n\nfrom appium import webdriver\n\n\nclass TestAppiumWeb:\n def setup(self):\n\n desired_caps = {'platformName': 'Android',\n 'platformVersion': '6.0',\n 'deviceName': '127.0.0.1:7555',\n 'noReset': 'true',\n 'dontStopAppOnReset': 'true',\n 'unicodeKeyboard': 'true',\n 'resetKeyboard': 'true',\n 'browserName': 'Browser',\n 'chromedriverExecutable': 'E:/Files/chromedriver/52.0/chromedriver.exe'\n }\n\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\n self.driver.implicitly_wait(5)\n\n def teardown(self):\n pass\n\n def test_appium_web(self):\n self.driver.get(\"https://m.baidu.com\")\n sleep(3)", "sub_path": "appium_demo/test_appium_web.py", "file_name": "test_appium_web.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 20, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}]} {"seq_id": "26030003", "text": "from django.db import models\r\nfrom django.forms.models import modelform_factory\r\nfrom apps.magency.models import Advertisement, NewspaperAd, TelevisionAd, BillboardAd\r\nfrom apps.magency.models.advertisement import WebsiteAd\r\nfrom core.views.base import SecuredView\r\n\r\n__author__ = 'sha256'\r\n\r\n\r\nclass AdvertisementListView(SecuredView):\r\n\r\n def get(self, request, what):\r\n data = {}\r\n data['what'] = what\r\n return self.render(request, \"magency/ads_list.html\", data)\r\n\r\n\r\nclass AdvertisementAddView(SecuredView):\r\n\r\n def formfield_call_back(self, field, **kwargs):\r\n if isinstance(field, models.ManyToManyField):\r\n fies = field.formfield(**kwargs)\r\n if hasattr(field.related.parent_model, 'parent'):\r\n fies.label_from_instance = lambda ob: ob.parent.title + \" - \" + ob.title\r\n else:\r\n fies.label_from_instance = lambda ob: ob.title\r\n return fies\r\n elif isinstance(field, models.BooleanField):\r\n fies = field.formfield(**kwargs)\r\n fies.widget.attrs['class'] = \"ace-switch ace-switch-5\"\r\n return fies\r\n else:\r\n return field.formfield(**kwargs)\r\n\r\n def get_common_data(self, request):\r\n data = {}\r\n form2_klass = None\r\n if self.what == \"newspaper\":\r\n form2_klass = NewspaperAd\r\n elif self.what == \"television\":\r\n form2_klass = TelevisionAd\r\n elif self.what == \"billboard\":\r\n form2_klass = BillboardAd\r\n elif self.what == \"internet\":\r\n form2_klass = WebsiteAd\r\n\r\n Form1 = modelform_factory(Advertisement, exclude=('company', 'bill'))\r\n Form2 = modelform_factory(form2_klass, exclude=('advertisement',), formfield_callback=self.formfield_call_back)\r\n form1 = Form1(request.POST or None, request.FILES or None)\r\n form2 = Form2(request.POST or None)\r\n\r\n if form1.is_valid() and form2.is_valid():\r\n f1 = form1.save(commit=False)\r\n f1.company_id = request.user.id\r\n f1.save()\r\n\r\n f2 = form2.save(commit=False)\r\n f2.advertisement_id = f1.id\r\n f2.save()\r\n form2.save_m2m()\r\n\r\n form1 = Form1()\r\n form2 = Form2()\r\n\r\n data['form1'] = form1\r\n data['form2'] = form2\r\n\r\n return data\r\n\r\n def get(self, request, what):\r\n self.what = what\r\n return self.render(request, \"magency/ads_add.html\", {})\r\n\r\n def post(self, request, what):\r\n self.what = what\r\n return self.render(request, \"magency/ads_add.html\", {})", "sub_path": "apps/magency/views/advertisement.py", "file_name": "advertisement.py", "file_ext": "py", "file_size_in_byte": 2631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "core.views.base.SecuredView", "line_number": 10, "usage_type": "name"}, {"api_name": "core.views.base.SecuredView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "apps.magency.models.NewspaperAd", "line_number": 39, "usage_type": "name"}, {"api_name": "apps.magency.models.TelevisionAd", "line_number": 41, "usage_type": "name"}, {"api_name": "apps.magency.models.BillboardAd", "line_number": 43, "usage_type": "name"}, {"api_name": "apps.magency.models.advertisement.WebsiteAd", "line_number": 45, "usage_type": "name"}, {"api_name": "django.forms.models.modelform_factory", "line_number": 47, "usage_type": "call"}, {"api_name": "apps.magency.models.Advertisement", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.forms.models.modelform_factory", "line_number": 48, "usage_type": "call"}]} {"seq_id": "355605313", "text": "from functools import reduce\nfrom typing import Iterable, TypeVar\n\n\nT = TypeVar(\"T\")\n\n\ndef multiline_concat(\n s1: str, s2: str, merge_from_bottom: bool = True, sep: str = \" \"\n) -> str:\n \"\"\"\n Horizontally joins two multiline strings using seperator `sep`\n with proper spacing. Used to join two \"column\" of data\n \"\"\"\n\n s1_split = s1.split(\"\\n\")\n s2_split = s2.split(\"\\n\")\n\n # Pad number of lines in each string to ensure they're equal\n s1_split_length = len(s1_split)\n s2_split_length = len(s2_split)\n s1_fixed_length = [\n \"\" for _ in range(max(s1_split_length, s2_split_length) - s1_split_length)\n ]\n s2_fixed_length = [\n \"\" for _ in range(max(s1_split_length, s2_split_length) - s2_split_length)\n ]\n\n # Determine merge order\n if merge_from_bottom:\n s1_fixed_length.extend(s1_split)\n s2_fixed_length.extend(s2_split)\n else:\n s1_split.extend(s1_fixed_length)\n s2_split.extend(s2_fixed_length)\n s1_fixed_length = s1_split\n s2_fixed_length = s2_split\n\n # Pad each individual string within each column to ensure they're equal\n s1_length_max = max(len(s) for s in s1_fixed_length)\n s2_length_max = max(len(s) for s in s2_fixed_length)\n s1_fixed_width = [f\"{s:<{s1_length_max}}\" for s in s1_fixed_length]\n s2_fixed_width = [f\"{s:<{s2_length_max}}\" for s in s2_fixed_length]\n\n # Concatenate the two multiline strings\n return \"\\n\".join(sep.join(elem) for elem in zip(s1_fixed_width, s2_fixed_width))\n\n\ndef multiline_concat_list(\n s: Iterable[str], merge_from_bottom: bool = True, sep: str = \" \"\n) -> str:\n \"\"\"\n Horizontally joins a list of multiline strings\n \"\"\"\n\n return reduce(lambda x, y: multiline_concat(x, y, merge_from_bottom, sep), s)\n\n\ndef to_string(list_: Iterable[T]) -> Iterable[str]:\n return map(str, list_)\n", "sub_path": "camelcalc/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "typing.TypeVar", "line_number": 5, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 50, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 56, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 59, "usage_type": "name"}]} {"seq_id": "321804651", "text": "import discord\nfrom discord.ext import commands\n\nimport settings\nfrom data import data\nfrom helpers import *\nfrom datetime import datetime\n\n\nclass OSHA(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t@commands.command(name=\"osha-violation\",\n\t\t\t\t\t help=\"Report an OSHA violation ๐Ÿ‘ฎโ€โ™€๏ธ\", \n\t\t\t\t\t usage=\"<@person>\")\n\tasync def osha_violation(self, ctx, *args):\n\t\tperson = self.get_person(ctx, args)\n\t\tif not person:\n\t\t\tawait ctx.send('Who dat {}'.format(get_emoji(ctx.guild, 'jm_judge')))\n\t\t\treturn\n\n\t\tresp = '๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ\\n๐Ÿ‘ฎโ€โ™€๏ธ HANDS UP, {} ๐Ÿ‘ฎโ€โ™€๏ธ\\n๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ๐Ÿšจ๐Ÿ’ฅ'.format(person.mention)\n\t\tdesc = 'There have been **{}** since the last incident.\\nPrevious record: **{}**'.format('0 days','0 days')\n\t\temb = discord.Embed(description=desc, color=settings.OSHA_COLOR)\n\t\tawait ctx.send(resp,embed=emb)\n\n\tdef get_person(self, ctx, args):\n\t\tperson = None\n\t\tif ctx.message.mentions:\n\t\t\tperson = ctx.message.mentions[0]\n\t\telse:\n\t\t\tname = args[0].lower()\n\t\t\tfor per in ctx.guild.members:\n\t\t\t\tif per.display_name.lower() == name:\n\t\t\t\t\tperson = per\n\t\t\t\t\tbreak\n\t\treturn person\n\n\ndef setup(bot):\n\tbot.add_cog(OSHA(bot))", "sub_path": "cogs/osha.py", "file_name": "osha.py", "file_ext": "py", "file_size_in_byte": 1187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 25, "usage_type": "call"}, {"api_name": "settings.OSHA_COLOR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}]} {"seq_id": "572639475", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\n\n\n# ่กก้‡่‰ฒๅทฎ/้ขœ่‰ฒ็›ธไผผๆ€ง\n# https://zh.wikipedia.org/wiki/%E9%A2%9C%E8%89%B2%E5%B7%AE%E5%BC%82\ndef loss_cs(y_true, y_pred):\n # mse loss\n # mse_loss = nn.MSELoss(y_true, y_pred)\n # perceptual loss\n y_true *= 255 # [-1,1] => [0,255]\n y_pred *= 255 # [-1,1] => [0,255]\n rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2\n r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]\n g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]\n b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]\n percep_loss = torch.mean(torch.sqrt((((512+rmean)*r*r)/256) + 4*g*g + (((767-rmean)*b*b)/256)))/255.0\n # gen_total_err = 0.8*mse_loss+0.2*percep_loss\n return percep_loss\n\n# ่กก้‡่‰ฒๅฝฉๆ’ๅธธๆ€ง\n# https://www.cnblogs.com/wangyong/p/9141075.html\nclass loss_cc(nn.Module):\n\n def __init__(self):\n super(loss_cc, self).__init__()\n\n def forward(self, x):\n\n b,c,h,w = x.shape\n\n mean_rgb = torch.mean(x,[2,3],keepdim=True)\n mr,mg, mb = torch.split(mean_rgb, 1, dim=1)\n Drg = torch.pow(mr-mg,2)\n Drb = torch.pow(mr-mb,2)\n Dgb = torch.pow(mb-mg,2)\n k = torch.pow(torch.pow(Drg,2) + torch.pow(Drb,2) + torch.pow(Dgb,2),0.5)\n return k\n\n\n#\nclass loss_t(nn.Module):\n def __init__(self):\n super(loss_t, self).__init__()\n\n def forward(self, x, y):\n L2_temp = nn.MSELoss()(x, y)\n L1_temp = nn.L1Loss()(x, y)\n\n L_total = 0.3 * L1_temp + 0.7 * L2_temp # + 0.3*Le1_temp + 0.3*Le2_temp\n return L_total", "sub_path": "venv/MyLoss.py", "file_name": "MyLoss.py", "file_ext": "py", "file_size_in_byte": 1546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "torch.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}]} {"seq_id": "229060433", "text": "import torch, os\nimport pandas as pd\nfrom skimage import io\nfrom Models import resnet\n\n\nclass ModelCreator:\n\tdef __init__(self, args):\n\t\tself.masterDirectory = args.MasterDirectory if args.MasterDirectory[-1] == '/' else args.MasterDirectory + '/'\n\t\tself.annotationFile = args.AnnotationFile\n\t\tself.meansFile = args.MeansFile\n\t\tprint(args)\n\n\t\t# Set manual seed\n\t\ttorch.manual_seed(args.ManualSeed)\n\n\t\t# Parse and memoraize mean and standard deviation information\n\t\tself._parseMeanFile() # Creates self.means and self.stds attributes\n\n\t\t# Parse and memorize data, label, and partition information for \n\t\tself._parseAnnotationFile() \n\n\t\t# Generate model\n\t\tself._generateModel(args.Model, args.ModelDepth)\n\n\tdef _parseMeanFile(self):\n\t\tself.means = {}\n\t\tself.stds = {}\n\t\twith open(self.masterDirectory + self.meansFile) as f:\n\t\t\tfor i,line in enumerate(f):\n\t\t\t\tif i==0:\n\t\t\t\t\tcontinue\n\t\t\t\ttokens = line.rstrip().split(',')\n\t\t\t\tself.means[tokens[0]] = [float(x) for x in tokens[1:4]]\n\t\t\t\tself.stds[tokens[0]] = [float(x) for x in tokens[4:7]]\n\t\tprint(self.means)\n\t\tprint(self.stds)\n\n\n\tdef _parseAnnotationFile(self):\n\t\tself.annotateData = pd.read_csv(self.masterDirectory + self.annotationFile, sep = ',', header = 0)\n\t\tself.nClasses = len(self.annotateData.groupby(['Label'])['Label'])\n\t\tfor i,row in self.annotateData.iterrows():\n\t\t\tlocation = row.Location if row.Location[-1] == '/' else row.Location + '/'\n\t\t\tframes = [x for x in os.listdir(self.masterDirectory + location) if '.jpg' in x]\n\t\t\ttry:\n\t\t\t\tassert self.nFrames == len(frames)\n\t\t\texcept AttributeError:\n\t\t\t\tself.nFrames = len(frames)\n\t\t\tif i == 0:\n\t\t\t\timg = io.imread(self.masterDirectory + location + frames[0])\n\t\t\t\tassert img.shape[0] == img.shape[1]\n\t\t\t\tassert img.shape[2] == 3\n\t\t\t\tself.frameSize = img.shape[0]\n\n\n\tdef _generateModel(self, modelType, modelDepth):\n\t\t\n\t\tassert modelType in ['resnet']\n\n\t\tif modelType == 'resnet':\n\t\t\tassert modelDepth in [10, 18, 34, 50, 101, 152, 200]\n\t\t\tfrom Models.resnet import get_fine_tuning_parameters\n\n\t\t\tif modelDepth == 18:\n\t\t\t\tmodel = resnet.resnet18(num_classes=self.nClasses,sample_size=self.frameSize, sample_duration=self.nFrames)\n\n\t\treturn model, model.parameters()", "sub_path": "Modules/ModelCreator.py", "file_name": "ModelCreator.py", "file_ext": "py", "file_size_in_byte": 2176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "torch.manual_seed", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 51, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 51, "usage_type": "name"}, {"api_name": "Models.resnet.resnet18", "line_number": 66, "usage_type": "call"}, {"api_name": "Models.resnet", "line_number": 66, "usage_type": "name"}]} {"seq_id": "62571317", "text": "# -*- coding: utf-8 -*-\n\nimport watchlog\nfrom watchdog.observers import Observer\nimport time\nimport multiprocessing\n\n\ndef run_watch_log(log_dir, log_file):\n print('run watchlog...')\n observer = Observer()\n event_handler = watchlog.LogFileEventHandler(log_file)\n observer.schedule(event_handler, log_dir, True)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n print('end watchlog!!!')\n\n\ndef generate_log(log_path):\n print('run generate_log....')\n while True:\n time.sleep(1)\n with open(log_path, 'a') as fh:\n msg = 'just for testing !!!\\n'\n fh.writelines(msg)\n fh.flush()\n\n\nif __name__ == '__main__':\n log_file = './log/demo.log'\n log_dir = './log'\n p1 = multiprocessing.Process(target=run_watch_log, args=(log_dir, log_file))\n p2 = multiprocessing.Process(target=generate_log, args=(log_file, ))\n\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n\n", "sub_path": "demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 1038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "watchdog.observers.Observer", "line_number": 11, "usage_type": "call"}, {"api_name": "watchlog.LogFileEventHandler", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 38, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 39, "usage_type": "call"}]} {"seq_id": "151928956", "text": "import dash_player\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output, State\n\napp = dash.Dash(__name__)\n\napp.scripts.config.serve_locally = True\n\n\napp.layout = html.Div([\n dash_player.DashPlayer(\n id='video-player',\n url='http://media.w3.org/2010/05/bunny/movie.mp4',\n controls=True\n ),\n\n html.Button('Set seekTo to 10', id='button-seek-to'),\n\n html.Div(id='div-current-time', style={'margin-bottom': '20px'}),\n\n html.Div(id='div-method-output')\n])\n\n\n@app.callback(Output('div-current-time', 'children'),\n [Input('video-player', 'currentTime')])\ndef update_time(currentTime):\n return 'Current Time: {}'.format(currentTime)\n\n\n@app.callback(Output('div-method-output', 'children'),\n [Input('video-player', 'secondsLoaded')],\n [State('video-player', 'duration')])\ndef update_methods(secondsLoaded, duration):\n return 'Second Loaded: {}, Duration: {}'.format(secondsLoaded, duration)\n\n\n@app.callback(Output('video-player', 'seekTo'),\n [Input('button-seek-to', 'n_clicks')])\ndef set_seekTo(n_clicks):\n return 10\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)", "sub_path": "usage-method.py", "file_name": "usage-method.py", "file_ext": "py", "file_size_in_byte": 1239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "dash.Dash", "line_number": 7, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 12, "usage_type": "call"}, {"api_name": "dash_player.DashPlayer", "line_number": 13, "usage_type": "call"}, {"api_name": "dash_html_components.Button", "line_number": 19, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 21, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 23, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 27, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 28, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 33, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 34, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 35, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 40, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 41, "usage_type": "call"}]} {"seq_id": "50328286", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nimport pprint\nimport werkzeug\n\nfrom odoo import http\nfrom odoo.http import request\n\n_logger = logging.getLogger(__name__)\n\nclass TwilioController(http.Controller):\n\n @http.route([\n '/twilio/webhook',\n ], type='http', auth='none', csrf=False)\n def twilio_webhook(self, **post):\n \"\"\" Webhook \"\"\"\n _logger.info('Twilio: mensaje %s', pprint.pformat(post))\n request.env['twilio.phone_alias'].sudo().message_process(post)\n return str(\"\"\"\n\n\n\"\"\")\n", "sub_path": "controllers/message.py", "file_name": "message.py", "file_ext": "py", "file_size_in_byte": 582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "odoo.http.Controller", "line_number": 12, "usage_type": "attribute"}, {"api_name": "odoo.http", "line_number": 12, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 19, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 20, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 20, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 14, "usage_type": "name"}]} {"seq_id": "598865697", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n\ndef read_image(path):\n image_bytes = tf.io.read_file(path)\n image = tf.image.decode_image(image_bytes)\n return tf.cast(image, dtype=tf.float32)\n\n\ndef imshow(image, figsize=(16, 9), title=None):\n plt.figure(figsize=figsize)\n plt.axis('off')\n plt.imshow(np.uint8(image))\n\n if title:\n plt.title(title)\n\n\ndef visualize_detections(image,\n boxes,\n classes,\n scores,\n figsize=(12, 12),\n linewidth=1,\n color=[0, 0, 1],\n title=None,\n score_threshold=0.25,\n show_labels=True,\n save=False,\n filename=None):\n \"\"\"Visualize Detections\"\"\"\n image = np.array(image, dtype=np.uint8)\n plt.figure(figsize=figsize)\n\n if title:\n plt.title(title)\n\n plt.axis(\"off\")\n plt.imshow(image)\n ax = plt.gca()\n for box, _cls, score in zip(boxes, classes, scores):\n\n if score < score_threshold:\n continue\n\n text = \"{}: {:.2f}\".format(_cls, score)\n x1, y1, x2, y2 = box\n w, h = x2 - x1, y2 - y1\n patch = plt.Rectangle([x1, y1],\n w,\n h,\n fill=False,\n edgecolor=color,\n linewidth=linewidth)\n ax.add_patch(patch)\n\n if show_labels:\n ax.text(\n x1,\n y1,\n text,\n bbox={\n \"facecolor\": color,\n \"alpha\": 0.4\n },\n clip_box=ax.clipbox,\n clip_on=True,\n )\n\n if save:\n plt.savefig(filename, bbox_inches='tight')\n plt.close()\n", "sub_path": "retinanet/image_utils.py", "file_name": "image_utils.py", "file_ext": "py", "file_size_in_byte": 1962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tensorflow.io.read_file", "line_number": 7, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 7, "usage_type": "attribute"}, {"api_name": "tensorflow.image.decode_image", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}]} {"seq_id": "534520093", "text": "# Copyright edalize contributors\n# Licensed under the 2-Clause BSD License, see LICENSE for details.\n# SPDX-License-Identifier: BSD-2-Clause\n\nimport os.path\nfrom importlib import import_module\n\nfrom edalize.flows.edaflow import Edaflow\n\n\nclass Lint(Edaflow):\n \"\"\"Run a linter tool on the design\"\"\"\n\n argtypes = [\"vlogdefine\", \"vlogparam\"]\n\n FLOW_DEFINED_TOOL_OPTIONS = {\n \"verilator\": {\"mode\": \"lint-only\", \"exe\": \"false\", \"make_options\": []},\n # verible, spyglass, ascentlint, slang...\n }\n\n FLOW_OPTIONS = {\n \"frontends\": {\n \"type\": \"str\",\n \"desc\": \"Tools to run before linter (e.g. sv2v)\",\n \"list\": True,\n },\n \"tool\": {\n \"type\": \"str\",\n \"desc\": \"Select Lint tool\",\n },\n }\n\n @classmethod\n def get_tool_options(cls, flow_options):\n flow = flow_options.get(\"frontends\", [])\n tool = flow_options.get(\"tool\")\n if not tool:\n raise RuntimeError(\"Flow 'lint' requires flow option 'tool' to be set\")\n flow.append(tool)\n\n return cls.get_filtered_tool_options(flow, cls.FLOW_DEFINED_TOOL_OPTIONS)\n\n def configure_flow(self, flow_options):\n tool = self.flow_options.get(\"tool\", \"\")\n if not tool:\n raise RuntimeError(\"Flow 'lint' requires flow option 'tool' to be set\")\n flow = [(tool, [], self.FLOW_DEFINED_TOOL_OPTIONS.get(tool, {}))]\n # Add any user-specified frontends to the flow\n next_tool = tool\n\n for frontend in reversed(flow_options.get(\"frontends\", [])):\n flow[0:0] = [(frontend, [next_tool], {})]\n next_tool = frontend\n return flow\n\n def configure_tools(self, nodes):\n super().configure_tools(nodes)\n\n self.commands.default_target = nodes[\n self.flow_options.get(\"tool\")\n ].default_target\n", "sub_path": "edalize/flows/lint.py", "file_name": "lint.py", "file_ext": "py", "file_size_in_byte": 1877, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "edalize.flows.edaflow.Edaflow", "line_number": 11, "usage_type": "name"}]} {"seq_id": "206698057", "text": "from odoo import models, fields, api,_\r\nfrom odoo.exceptions import UserError\r\nimport logging\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass vit_accounting_invoice(models.Model):\r\n\t_name = 'account.invoice'\r\n\t_inherit = 'account.invoice'\r\n\r\n\t# name_inv = fields.Char(string=\"Name\", readonly=True, default=\"New\")\r\n\tbast_id = fields.Many2one(comodel_name=\"vit.ppic_bast\", string=\"BAST No\")\r\n\tjo_id = fields.Many2one(string=\"Job Order No\", related=\"bast_id.jo_id\")\r\n\tproject = fields.Text(string=\"Project\", related=\"bast_id.project_dest\")\r\n\tpo_id = fields.Many2one(string=\"PO No\", related=\"jo_id.po_id\")\r\n\tpartner_id = fields.Many2one(string=\"Customer Name\", related=\"jo_id.partner_id\", store=True)\r\n\t# partner_id = fields.Many2one('res.partner', string='Partner', change_default=True, states={'draft': [('readonly', False)]}, track_visibility='always', ondelete='restrict', help=\"You can find a contact by its Name, TIN, Email or Internal Reference.\", )\r\n\tcustomer_code = fields.Char(string=\"Customer No\", related=\"partner_id.ref\")\r\n\tdelivery_address = fields.Text(string=\"Address\", store=True)\r\n\ttransfer_rek = fields.Text(string=\"Transfer to\")\r\n\t\r\n\r\n\t@api.onchange('partner_id')\r\n\tdef _get_partner_address(self):\r\n\t\tif not self.partner_id:\r\n\t\t\treturn\r\n\t\tself.delivery_address = \"{name} {city}, {state}, {country}\\nP.O Box : {zip}\\nTel. : {phone}\\nEmail : {email}\".format(\r\n\t\t\tname = self.partner_id.street,\r\n\t\t\tzip= self.partner_id.zip,\r\n\t\t\tcity= self.partner_id.city,\r\n\t\t\tstate= self.partner_id.state_id.name,\r\n\t\t\tphone= self.partner_id.phone,\r\n\t\t\tcountry= self.partner_id.country_id.name,\r\n\t\t\temail= self.partner_id.email\r\n\t\t\t)\r\n\r\n\t@api.onchange('bast_id')\r\n\tdef change_lines_invoice(self):\r\n\t\tif not self.bast_id:\r\n\t\t\treturn\r\n\t\tline_data = []\r\n\t\tfor res in self:\r\n\t\t\tres.update({\"invoice_line_ids\": False})\r\n\t\t\tfor que in res.jo_id.product_ids:\r\n\t\t\t\tres_que = que.product_id.categ_id.property_stock_account_input_categ_id.id\r\n\t\t\t\tproduct_id_marketing = self.env['product.product'].search([('product_tmpl_id','=',que.product_id.id)])\r\n\t\t\t\tif res_que:\t\r\n\t\t\t\t\tline_data = [(0,0,{\r\n\t\t\t\t\t\t\t\"product_id\": product_id_marketing.id,\r\n\t\t\t\t\t\t\t\"name\": que.product_id.name,\r\n\t\t\t\t\t\t\t\"quantity\": round(que.total_weight,1),\r\n\t\t\t\t\t\t\t\"price_unit\" : que.unit_price,\r\n\t\t\t\t\t\t\t\"account_id\": res_que,\r\n\t\t\t\t\t\t\t# 'invoice_line_tax_ids': self.invoice_line_tax_ids.ids\r\n\t\t\t\t\t\t})]\r\n\t\t\t\t\tres.update({\"invoice_line_ids\": line_data})\r\n\t\t\t\telse:\r\n\t\t\t\t\traise UserError(_('Category Stock Valuation pada Product tidak boleh kosong'))\r\n\t\t\tfor loop in res.jo_id.additional_po_ids:\r\n\t\t\t\tres_additional = self.env['account.account'].search([('name','like','Persediaan Lainnya')])\r\n\t\t\t\tif res_additional:\r\n\t\t\t\t\tline_data = [(0,0,{\r\n\t\t\t\t\t\t\t# \"product_id\": que.product_id.id,\r\n\t\t\t\t\t\t\t\"name\": loop.name,\r\n\t\t\t\t\t\t\t\"quantity\": 1,\r\n\t\t\t\t\t\t\t\"price_unit\" : loop.total_price,\r\n\t\t\t\t\t\t\t\"account_id\": res_additional.id\r\n\t\t\t\t\t\t})]\r\n\t\t\t\t\tres.update({\"invoice_line_ids\": line_data})\r\n\t\t\t\telse:\r\n\t\t\t\t\traise UserError(_('Chart of Account pada Product tidak memiliki kategori Persediaan Lainnya'))\r\n\r\nclass AccountInvoiceLine(models.Model):\r\n\t_name = \"account.invoice.line\"\r\n\t_inherit = \"account.invoice.line\"\r\n\r\n\tinvoice_line_tax_ids = fields.Many2many('account.tax', string='Taxes')\r\n", "sub_path": "vit_accounting_invoice/models/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3232, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 7, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 12, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 13, "usage_type": "name"}, {"api_name": "odoo.fields.Text", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 18, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Text", "line_number": 19, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.fields.Text", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 20, "usage_type": "name"}, {"api_name": "odoo.api.onchange", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 71, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 71, "usage_type": "call"}, {"api_name": "odoo.api.onchange", "line_number": 37, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 37, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 73, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 73, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 77, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 77, "usage_type": "name"}]} {"seq_id": "617868963", "text": "import sys\nimport pytest\n\nsys.path.append('..')\nfrom portfolio import app, db\n\n@pytest.fixture(scope='module')\ndef client():\n app.testing = True\n client = app.test_client()\n\n # Establish an application context before running the tests.\n ctx = app.app_context()\n ctx.push()\n\n yield client\n\n ctx.pop()\n\n@pytest.fixture(scope='module')\ndef init_database():\n # Create the database and the database table\n db.create_all()\n\n # Insert user data\n #user1 = User(email='patkennedy79@gmail.com', plaintext_password='FlaskIsAwesome')\n #user2 = User(email='kennedyfamilyrecipes@gmail.com', plaintext_password='PaSsWoRd')\n #db.session.add(user1)\n #db.session.add(user2)\n\n # Commit the changes for the users\n #db.session.commit()\n\n yield db # this is where the testing happens!\n\n db.drop_all()\n", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 834, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "portfolio.app.testing", "line_number": 9, "usage_type": "attribute"}, {"api_name": "portfolio.app", "line_number": 9, "usage_type": "name"}, {"api_name": "portfolio.app.test_client", "line_number": 10, "usage_type": "call"}, {"api_name": "portfolio.app", "line_number": 10, "usage_type": "name"}, {"api_name": "portfolio.app.app_context", "line_number": 13, "usage_type": "call"}, {"api_name": "portfolio.app", "line_number": 13, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 7, "usage_type": "call"}, {"api_name": "portfolio.db.create_all", "line_number": 23, "usage_type": "call"}, {"api_name": "portfolio.db", "line_number": 23, "usage_type": "name"}, {"api_name": "portfolio.db", "line_number": 34, "usage_type": "name"}, {"api_name": "portfolio.db.drop_all", "line_number": 36, "usage_type": "call"}, {"api_name": "portfolio.db", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 20, "usage_type": "call"}]} {"seq_id": "317254136", "text": "from tkinter import * # GUI ํ•„์ˆ˜\r\nfrom tkinter import messagebox\r\nfrom tkinter.filedialog import * # ํŒŒ์ผ ๋Œ€ํ™”์ƒ์ž\r\nfrom tkinter.simpledialog import *\r\n\r\nimport cv2\r\n\r\n## ํ•จ์ˆ˜ ์„ ์–ธ๋ถ€\r\n\r\ndef malloc(row, col, init=0): # value ๊ฐ’์„ ์•ˆ์ ๊ณ  ๋ณด๋‚ด๋ฉด 0 ์žˆ์œผ๋ฉด value ๊ฐ’ ex. mall2(3,4)\r\n # ๋ฐฐ์—ด ์ƒ์„ฑ\r\n retAry = [\r\n [\r\n [init for _ in range(col)] for _ in range(row)\r\n ] for _ in range(RGB)\r\n ]\r\n return retAry\r\n\r\ndef openImg() :\r\n global window, canvas, paper, inImg, outImg, inH, inW, outH, outW\r\n global fileName, inCvImg, outCvImg, RGB, RR, GG, BB\r\n\r\n fileName = askopenfilename(parent=window,\r\n filetypes=(('Color File', '*.png;*.jpg;*,bmp;*.tif'), ('All File', '*.*')))\r\n # File --> CV ๊ฐ์ฒด\r\n inCvImg = cv2.imread(fileName)\r\n # ์ž…๋ ฅ ์˜์ƒ ํฌ๊ธฐ ์•Œ์•„๋‚ด๊ธฐ (์ค‘์š”!)\r\n inH, inW = inCvImg.shape[:2]\r\n # ๋ฉ”๋ชจ๋ฆฌ ํ™•๋ณด\r\n inImg = malloc(inH, inW)\r\n # OpenCV --> ๋ฉ”๋ชจ๋ฆฌ\r\n for i in range(inH):\r\n for k in range(inW):\r\n inImg[BB][i][k] = inCvImg.item(i, k, RR)\r\n inImg[GG][i][k] = inCvImg.item(i, k, GG)\r\n inImg[RR][i][k] = inCvImg.item(i, k, BB)\r\n\r\n # print(inImage[RR][100][100], inImage[GG][100][100], inImage[BB][100][100])\r\n\r\n equal_img()\r\n\r\ndef displayImg() :\r\n global window, canvas, paper, inImg, outImg, inH, inW, outH, outW\r\n global fileName, inCvImg, outCvImg, RGB, RR, GG, BB\r\n\r\n ## ๊ธฐ์กด์— ๊ทธ๋ฆผ์„ ๋ถ™์ธ์ ์ด ์žˆ์œผ๋ฉด ๊ฒŒ์‹œํŒ(canvas) ๋œฏ์–ด๋‚ด๊ธฐ\r\n if (canvas != None) :\r\n canvas.destroy()\r\n\r\n window.geometry(str(outW) + \"x\" + str(outH)) # ๋ฒฝ ํฌ๊ธฐ ์กฐ์ ˆ\r\n canvas = Canvas(window, height=outH, width=outW) # ๊ฒŒ์‹œํŒ ํฌ๊ธฐ ์กฐ์ ˆ\r\n paper = PhotoImage(height=outH, width=outW)\r\n canvas.create_image(\r\n (outW/2, outH/2), # ์ค‘์‹ฌ์  ์ฐพ๊ธฐ\r\n image=paper,\r\n state='normal'\r\n )\r\n\r\n #Python์€ ๋А๋ฆฌ๊ธฐ ๋–„๋ฌธ์— ํ•˜๋‚˜์”ฉ ์ฐ๊ธฐ๋Š” ํž˜๋“ค์–ด์„œ ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ฐ๊ธฐ\r\n rgbString = \"\" # ์ „์ฒด ํŽœ์„ ์ €์žฅํ•จ\r\n for i in range (outH) :\r\n tmpString = \"\" # ๊ฐ ํ•œ ์ค„์˜ ํŽœ\r\n for k in range(outW) :\r\n rr = outImg[RR][i][k]\r\n gg = outImg[GG][i][k]\r\n bb = outImg[BB][i][k]\r\n tmpString += \"#%02x%02x%02x \" % (rr,gg,bb) # ์ œ์ผ ๋’ค ๊ณต๋ฐฑ ํ•œ์นธ\r\n rgbString += '{' + tmpString + '} ' # ์ œ์ผ ๋’ค ๊ณต๋ฐฑ ํ•œ์นธ\r\n\r\n paper.put(rgbString)\r\n canvas.pack()\r\n status.configure(text = '์ด๋ฏธ์ง€์ •๋ณด' + str(outW) + 'x' + str(outH) + ' ' + fileName)\r\n\r\n\r\ndef equal_img() :\r\n global window, canvas, paper, inImg, outImg, inH, inW, outH, outW\r\n global fileName, inCvImg, outCvImg, RGB, RR, GG, BB\r\n if (inImg == None) :\r\n return\r\n\r\n outH = inH; outW = inW\r\n outImg = malloc(outH, outW)\r\n\r\n for rgb in range (RGB) :\r\n for i in range(inH) :\r\n for k in range (inW) :\r\n outImg[rgb][i][k] = inImg[rgb][i][k]\r\n\r\n displayImg()\r\n\r\ndef gray_img() :\r\n global window, canvas, paper, inImg, outImg, inH, inW, outH, outW\r\n global fileName, inCvImg, outCvImg, RGB, RR, GG, BB\r\n if (inImg == None) :\r\n return\r\n\r\n outImg = malloc(outH, outW)\r\n\r\n for i in range(inH) :\r\n for k in range (inW) :\r\n hap = inImg[RR][i][k] + inImg[GG][i][k] + inImg[BB][i][k]\r\n outImg[RR][i][k] = outImg[GG][i][k] = outImg[BB][i][k] = hap/3\r\n\r\n displayImg()\r\n\r\ndef bright_img() :\r\n global window, canvas, paper, inImg, outImg, inH, inW, outH, outW\r\n global fileName, inCvImg, outCvImg, RGB, RR, GG, BB\r\n if (inImg == None) :\r\n return;\r\n\r\n outImg = malloc(outH, outW)\r\n\r\n value = askinteger(\"๋ฐ๊ฒŒ/์–ด๋‘ก๊ฒŒ\", \"๊ฐ’ ์ž…๋ ฅ\", minvalue=-255, maxvalue=255)\r\n\r\n for rgb in range(RGB):\r\n for i in range(inH):\r\n for k in range(inW):\r\n if ((inImg[rgb][i][k] + value) > 255) :\r\n outImg[rgb][i][k] = 255\r\n elif ((inImg[rgb][i][k] + value) < 0) :\r\n outImg[rgb][i][k] = 0\r\n else :\r\n outImg[rgb][i][k] = inImg[rgb][i][k] + value\r\n\r\n displayImg()\r\n\r\n## ์ „์—ญ ๋ณ€์ˆ˜๋ถ€\r\nwindow, canvas, paper = None, None, None # ๋ฒฝ, ๊ฒŒ์‹œํŒ, ์ข…์ด\r\ninImg, outImg = None, None # 3์ฐจ์› ๋ฐฐ์—ด\r\ninH, inW, outH, outW = [0] * 4\r\nfileName = None\r\ninCvImg, outCvImg = None, None # OpenCV์šฉ\r\nRGB, RR, GG, BB = 3, 0, 1, 2\r\n\r\n## ๋ฉ”์ธ ์ฝ”๋“œ๋ถ€\r\nwindow = Tk()\r\nwindow.title(\"์˜์ƒ์ฒ˜๋ฆฌ(Python) (Beta 1)\")\r\nwindow.geometry('700x600')\r\nwindow.resizable(width=False, height=False)\r\n\r\n## ์•„๋ž˜ ์ƒํƒœ์ฐฝ\r\nstatus = Label(window, text = \"์ด๋ฏธ์ง€์ •๋ณด:\", bd=1, relief=SUNKEN, anchor=W)\r\nstatus.pack(side=BOTTOM, fill=X)\r\n\r\n## ์œ„ ๋ฉ”๋‰ด๋ฐ”\r\nmainMenu = Menu(window) # ๋ฉ”์ธ ๋ฉ”๋‰ด\r\nwindow.config(menu = mainMenu)\r\n\r\n# ํŒŒ์ผ ๋ฉ”๋‰ด\r\nfileMenu = Menu(mainMenu)\r\nmainMenu.add_cascade(label = 'ํŒŒ์ผ', menu = fileMenu)\r\nfileMenu.add_command(label = '์—ด๊ธฐ', command = openImg)\r\n# fileMenu.add_command(label = '์ €์žฅ', command = saveImg)\r\n# fileMenu.add_separator() # ๋นˆ์ค„\r\n# fileMenu.add_command(label = '์ข…๋ฃŒ', command = None)\r\n#\r\n# # OpenCV ๋ฉ”๋‰ด\r\n# openCvMenu = Menu(mainMenu)\r\n# mainMenu.add_cascade(label = 'OpenCV', menu = openCvMenu)\r\n# openCvMenu.add_command(label = '์—ด๊ธฐ', command = None)\r\n# openCvMenu.add_command(label = '์ €์žฅ', command = None)\r\n#\r\n# ๊ธฐ๋Šฅ ๋ฉ”๋‰ด\r\nfunctionMenu = Menu(mainMenu)\r\nmainMenu.add_cascade(label = '๊ธฐ๋Šฅ', menu = functionMenu)\r\nfunctionMenu.add_command(label = '๋™์ผ์ด๋ฏธ์ง€', command = equal_img)\r\n#\r\n# # ํ™”์†Œ์  ์ฒ˜๋ฆฌ\r\ndotFunctionMenu = Menu(functionMenu)\r\nfunctionMenu.add_cascade(label = 'ํ™”์†Œ์ ์ฒ˜๋ฆฌ', menu = dotFunctionMenu)\r\ndotFunctionMenu.add_command(label = '๋ฐ๊ฒŒ/์–ด๋‘ก๊ฒŒ', command = bright_img)\r\n# dotFunctionMenu.add_command(label = '๊ฐ๋งˆ', command = gamma)\r\n# dotFunctionMenu.add_command(label = '๋ฐ˜์ „', command = reverse_img)\r\ndotFunctionMenu.add_command(label = '๊ทธ๋ ˆ์ด', command = gray_img)\r\n# dotFunctionMenu.add_command(label = '๊ฐ•์กฐ', command = focus)\r\n# dotFunctionMenu.add_command(label = '์ฑ„๋„', command = changeColor_img)\r\n# dotFunctionMenu.add_command(label = 'ํฌ์Šคํ„ฐ๋ผ์ด์ง•', command = posterising)\r\n#\r\n# # ํ™”์†Œ์˜์—ญ ์ฒ˜๋ฆฌ\r\n# rangeFunctionMenu = Menu(functionMenu)\r\n# functionMenu.add_cascade(label = 'ํ™”์†Œ์˜์—ญ์ฒ˜๋ฆฌ', menu = rangeFunctionMenu)\r\n# rangeFunctionMenu.add_command(label = '์ƒคํ”„๋‹', command = sharp_image)\r\n# rangeFunctionMenu.add_command(label = '๊ณ ์ฃผํŒŒ ํ•„ํ„ฐ๋ง', command = hFillter_image)\r\n# rangeFunctionMenu.add_command(label = '๊ฐ€์šฐ์‹œ์•ˆ', command = gaussian_image)\r\n# rangeFunctionMenu.add_command(label = '์ˆ˜์ง์—ฃ์ง€', command = vecticalEdege_image)\r\n# rangeFunctionMenu.add_command(label = '์ˆ˜ํ‰์—ฃ์ง€', command = horizonEdge_image)\r\n# rangeFunctionMenu.add_command(label = '๋ธ”๋Ÿฌ๋ง', command = blurr_image)\r\n# rangeFunctionMenu.add_command(label = '์— ๋ณด์‹ฑ', command = emboss_image)\r\n# rangeFunctionMenu.add_command(label = '์œ ์‚ฌ ์—ฐ์‚ฐ์ž', command = homogen_image)\r\n#\r\n# # ๊ธฐํ•˜ํ•™ ์ฒ˜๋ฆฌ\r\n# moveFunctionMenu = Menu(functionMenu)\r\n# functionMenu.add_cascade(label = '๊ธฐํ•˜ํ•™์ฒ˜๋ฆฌ', menu = moveFunctionMenu)\r\n# moveFunctionMenu.add_command(label = 'ํ™•๋Œ€', command = zoom_in_Img)\r\n# moveFunctionMenu.add_command(label = '์ถ•์†Œ', command = zoom_out_Img)\r\n# moveFunctionMenu.add_command(label = '์ด๋™', command = move_Img)\r\n# moveFunctionMenu.add_command(label = '๊ฐ€๋กœ๋ฐ˜์ „', command = Row_Mirror_Img)\r\n# moveFunctionMenu.add_command(label = '์„ธ๋กœ๋ฐ˜์ „', command = High_Mirror_Img)\r\n#\r\n# # ํžˆ์Šคํ† ๊ทธ๋žจ\r\n# histoFunctionMenu = Menu(functionMenu)\r\n# functionMenu.add_cascade(label = 'ํžˆ์Šคํ† ๊ทธ๋žจ', menu = histoFunctionMenu)\r\n# histoFunctionMenu.add_command(label = 'ํ‘œ ์ž‘์„ฑํ•˜๊ธฐ', command = draw_Histogram)\r\n# histoFunctionMenu.add_command(label = '์•ค๋“œ์ธ', command = histo_Endin_img)\r\n# histoFunctionMenu.add_command(label = '์ŠคํŠธ๋ ˆ์น˜', command = histo_Strech_img)\r\n# histoFunctionMenu.add_command(label = 'ํ‰ํ™œํ™”', command = equalized_img)\r\n\r\nwindow.mainloop()", "sub_path": "Day1_05 ์˜์ƒ์ฒ˜๋ฆฌ with Python (Beta1).py", "file_name": "Day1_05 ์˜์ƒ์ฒ˜๋ฆฌ with Python (Beta1).py", "file_ext": "py", "file_size_in_byte": 8127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cv2.imread", "line_number": 26, "usage_type": "call"}]} {"seq_id": "472521665", "text": "import time\nimport threading\nimport redis\n\nconn = redis.Redis(host=\"127.0.0.1\", port=\"6379\", db=0)\n\ndef publisher(n: int):\n time.sleep(1)\n for i in range(n):\n conn.publish(\"channel\", i+2)\n time.sleep(1)\n\n\n\ndef run_pubsub():\n t = threading.Thread(target=publisher, args=(3,))\n t.start()\n pubsub = conn.pubsub()\n pubsub.subscribe([\"channel\"])\n count = 0\n for item in pubsub.listen():\n print(item)\n count += 1\n if count == 4:\n pubsub.unsubscribe()\n if count == 5:\n break\nrun_pubsub()", "sub_path": "redis/ๅ‘ๅธƒไธŽ่ฎข้˜….py", "file_name": "ๅ‘ๅธƒไธŽ่ฎข้˜….py", "file_ext": "py", "file_size_in_byte": 568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "redis.Redis", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 16, "usage_type": "call"}]} {"seq_id": "374138681", "text": "# coding: utf-8\nfrom K_RockerArm_Reward import *\nimport random\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\nclass KRockerArm_Softmax(object):\n\t\n\tdef __init__(self, K_count, temperature, walk_count):\n\t\tself.K_count = K_count\n\t\tself.temperature = temperature\n\t\tself.walk_count = walk_count\n\t\tself.r = 0.0\n\t\tself.r_array = np.zeros(walk_count)\n\t\tself.Q = np.zeros(K_count)\n\t\tself.count = np.zeros(K_count)\n\n\tdef select_one_K(self):\n\t\t# ่ฎก็ฎ—ๆฆ‚็އๅˆ†ๅธƒ\n\t\texp_parameter = self.Q / self.temperature\n\t\tmax_exp_parameter = np.max(exp_parameter)\n\t\tnormal_exp_parameter = exp_parameter - max_exp_parameter\n\t\tenergy = np.exp(normal_exp_parameter)\n\t\tpossibility = energy / np.sum(energy)\n\t\t# ๆ นๆฎๆฆ‚็އๅˆ†ๅธƒ๏ผŒ่ฟ”ๅ›ž้šๆœบๅ€ผ\n\t\tK_indexs = np.arange(self.K_count) + 1\n\t\tcustm = stats.rv_discrete(name='custm', values=(K_indexs, possibility))\n\t\treturn custm.rvs()\n\n\t# EE: Exploration-Exploitation ๆŽข็ดขๅˆฉ็”จ\n\tdef EE(self):\n\t\tfor i in range(self.walk_count):\n\t\t\tK_index = self.select_one_K()\n\t\t\tv = K_RockerArm_Reward(K_index)\n\t\t\tvalue_index = K_index - 1\n\t\t\tself.r += v\n\t\t\tself.Q[value_index] = (self.Q[value_index] * self.count[value_index] + v) / (self.count[value_index] + 1)\n\t\t\tself.count[value_index] += 1\n\t\t\tself.r_array[i] = self.r / (i + 1.0)\n\ndef test():\n\twalk_count = 3000\n\tgreedy0 = KRockerArm_Softmax(2, 0.001, walk_count)\n\tgreedy0.EE()\n\tgreedy1 = KRockerArm_Softmax(2, 0.01, walk_count)\n\tgreedy1.EE()\n\tgreedy2 = KRockerArm_Softmax(2, 0.1, walk_count)\n\tgreedy2.EE()\n\tgreedy3 = KRockerArm_Softmax(2, 1.0, walk_count)\n\tgreedy3.EE()\n\n\tplt.plot(np.arange(walk_count), greedy0.r_array, label=\"0.001\", color=\"blue\")\n\tplt.plot(np.arange(walk_count), greedy1.r_array, label=\"0.01\", color=\"green\")\n\tplt.plot(np.arange(walk_count), greedy2.r_array, label=\"0.1\", color=\"black\")\n\tplt.plot(np.arange(walk_count), greedy3.r_array, label=\"1.0\", color=\"red\")\n\t# plt.plot(x, z, label=\"$cos(x)$\")\n\tplt.xlabel(\"Time(s)\")\n\tplt.ylabel(\"Average Reward\")\n\tplt.title(\"K Rocker Arm with Softmax\")\n\n\tplt.legend() #ๆ˜พ็คบๅ›พ็คบ\n\tplt.show()\n\nif __name__ == '__main__':\n\ttest()\n", "sub_path": "2016-02-13-ๅผบๅŒ–ๅญฆไน /K_RockerArm/K_RockerArm_Softmax.py", "file_name": "K_RockerArm_Softmax.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "scipy.stats.rv_discrete", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}]} {"seq_id": "381382907", "text": "from django.forms import ModelForm, DateTimeField, DateTimeInput\nfrom .models import Event\nfrom django.conf import settings\n\nclass EventForm(ModelForm):\n class Meta:\n model = Event\n time = DateTimeField(required=True, input_formats=settings.DATETIME_INPUT_FORMATS)\n fields = ('title', 'time', 'description',)\n widgets = {\n 'time': DateTimeInput(attrs={'placeholder': 'YYYY-MM-DD HH:MM'})\n }", "sub_path": "events/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "name"}, {"api_name": "models.Event", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.DateTimeField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.settings.DATETIME_INPUT_FORMATS", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.DateTimeInput", "line_number": 11, "usage_type": "call"}]} {"seq_id": "1342168", "text": "import cv2\n\n\ndef emptyFunction():\n pass\n\n\nthresh = 127\ncap = cv2.VideoCapture(0)\n\nwindowName = 'Object Tracker'\ntrackbarName = 'Chooser'\n\ncv2.namedWindow(windowName)\ncv2.createTrackbar(trackbarName, windowName, 0, 2, emptyFunction)\n\n\nwhile True:\n ret, frame = cap.read()\n button_state = cv2.getTrackbarPos(trackbarName, windowName)\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if button_state == 0 and thresh < 256:\n thresh = thresh + 1\n if button_state == 2 and thresh > -1:\n thresh = thresh - 1\n\n ret1, output = cv2.threshold(gray_frame, thresh, 255, cv2.THRESH_BINARY)\n print(thresh)\n cv2.imshow(windowName, output)\n if cv2.waitKey(1) == 27:\n break\n\ncv2.destroyAllWindows()", "sub_path": "treshold_trackbar.py", "file_name": "treshold_trackbar.py", "file_ext": "py", "file_size_in_byte": 738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cv2.VideoCapture", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 34, "usage_type": "call"}]} {"seq_id": "125067209", "text": "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\"\"\"api to compare array\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nimport six\n\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n \"\"\"Asserts if some corresponding element of x and y differs too much.\n\n This function can handle both CPU and GPU arrays simultaneously.\n\n Args:\n x: Left-hand-side array.\n y: Right-hand-side array.\n atol (float): Absolute tolerance.\n rtol (float): Relative tolerance.\n verbose (bool): If ``True``, it outputs verbose messages on error.\n\n \"\"\"\n try:\n #logging.warning(\"int8_outputis {} and fp32 output is {} \".format(x, y))\n np.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n return True\n except AssertionError as e:\n f = six.StringIO()\n f.write(str(e) + '\\n\\n')\n f.write(\n 'assert_allclose failed: \\n' +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n #raise AssertionError(f.getvalue())\n logging.warning(f.getvalue())\n return False\n\n\n\n\n\ndef assert_compare(x, y, atol=1e-5, method='ALL'):\n \"\"\"method can be MSE, MAE and RMSE\"\"\"\n mae = 0\n mse = 0\n rmse = 0\n result = 0\n if method == 'MAE':\n mae = np.abs(x-y).mean()\n result = mae\n elif method == 'RMSE':\n rmse = np.sqrt(np.square(x - y).mean())\n result = rmse\n #result=np.sqrt(((x - y) ** 2).mean())\n elif method == 'MSE':\n mse = np.square(x - y).mean()\n result = mse\n #result=((x - y) ** 2).mean()\n else:\n mae = np.abs(x-y).mean()\n rmse = np.sqrt(np.square(x - y).mean())\n mse = np.square(x - y).mean()\n\n if result > atol or (method == 'ALL' and (mae > atol or rmse > atol or mse > atol)):\n f = six.StringIO()\n f.write(\n 'assert_compare failed: \\n' +\n ' atol: {} \\n'.format(atol) +\n ' method: {}\\n'.format(method) +\n ' MAE: {}\\n'.format(mae) +\n ' MSE: {}\\n'.format(mse) +\n ' RMSE: {}\\n'.format(rmse) +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n logging.warning(f.getvalue())\n return False\n else:\n return True\n", "sub_path": "detectron/utils/compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 4662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 46, "usage_type": "attribute"}, {"api_name": "six.StringIO", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.get_printoptions", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array2string", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array2string", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 101, "usage_type": "call"}, {"api_name": "six.StringIO", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.get_printoptions", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array2string", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array2string", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 131, "usage_type": "call"}]} {"seq_id": "366250343", "text": "\"\"\"\n\n rex.widget.map\n ==============\n\n :copyright: 2015, Prometheus Research, LLC\n\n\"\"\"\n\nfrom cached_property import cached_property\nfrom webob.exc import HTTPUnauthorized, HTTPBadRequest\n\nfrom rex.urlmap import Map\nfrom rex.core import Error, StrVal, MapVal, BoolVal\nfrom rex.web import authorize, confine, PathMask\n\nfrom .validate import WidgetVal, DeferredVal\nfrom .render import render\n\n__all__ = ('MapWidget', 'WidgetRenderer')\n\n\nclass MapWidget(Map):\n \"\"\" Parses an URL mapping record.\"\"\"\n\n fields = [\n ('widget', DeferredVal()),\n ('title', StrVal(), None),\n ('no_chrome', BoolVal(), False),\n ('access', StrVal(), None),\n ]\n\n def __call__(self, spec, path, context):\n access = spec.access or self.package.name\n widget = lambda: spec.widget.resolve(WidgetVal())\n return WidgetRenderer(\n path, widget, access,\n title=spec.title,\n no_chrome=spec.no_chrome)\n\n def override(self, spec, override_spec):\n if override_spec.widget is not None:\n spec = spec.__clone__(widget=override_spec.widget)\n if override_spec.title is not None:\n spec = spec.__clone__(title=override_spec.title)\n if override_spec.access is not None:\n spec = spec.__clone__(access=override_spec.access)\n if override_spec.no_chrome != spec.no_chrome:\n spec = spec.__clone__(no_chrome=override_spec.no_chrome)\n return spec\n\n def mask(self, path):\n if path.endswith('/'):\n sub_path = '%s@@/{path:**}' % path\n else:\n sub_path = '%s/@@/{path:**}' % path\n return [\n PathMask(path),\n PathMask(sub_path),\n ]\n\n\ndef match(mask, request):\n try:\n return mask(request.path_info)\n except ValueError:\n return None\n\n\nclass WidgetRenderer(object):\n\n def __init__(self, path, widget, access, title=None, no_chrome=False):\n self.path = path\n self._widget = widget\n self.access = access\n self.title = title\n self.no_chrome = no_chrome\n\n @cached_property\n def widget(self):\n return self._widget()\n\n def validate(self):\n self.widget\n\n def __call__(self, request):\n if not authorize(request, self.access):\n raise HTTPUnauthorized()\n try:\n with confine(request, self):\n own, via_path = self.path\n params = match(own, request)\n if params is not None:\n return render(\n self.widget, request,\n title=self.title,\n no_chrome=self.no_chrome)\n params = match(via_path, request)\n if params is not None:\n return render(\n self.widget, request,\n path=params['path'])\n raise HTTPBadRequest()\n except Error as error:\n return request.get_response(error)\n", "sub_path": "src/rex.widget/src/rex/widget/map.py", "file_name": "map.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "rex.urlmap.Map", "line_number": 23, "usage_type": "name"}, {"api_name": "validate.DeferredVal", "line_number": 27, "usage_type": "call"}, {"api_name": "rex.core.StrVal", "line_number": 28, "usage_type": "call"}, {"api_name": "rex.core.BoolVal", "line_number": 29, "usage_type": "call"}, {"api_name": "rex.core.StrVal", "line_number": 30, "usage_type": "call"}, {"api_name": "validate.WidgetVal", "line_number": 35, "usage_type": "call"}, {"api_name": "rex.web.PathMask", "line_number": 58, "usage_type": "call"}, {"api_name": "rex.web.PathMask", "line_number": 59, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 79, "usage_type": "name"}, {"api_name": "rex.web.authorize", "line_number": 87, "usage_type": "call"}, {"api_name": "webob.exc.HTTPUnauthorized", "line_number": 88, "usage_type": "call"}, {"api_name": "rex.web.confine", "line_number": 90, "usage_type": "call"}, {"api_name": "render.render", "line_number": 94, "usage_type": "call"}, {"api_name": "render.render", "line_number": 100, "usage_type": "call"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 103, "usage_type": "call"}, {"api_name": "rex.core.Error", "line_number": 104, "usage_type": "name"}]} {"seq_id": "29376994", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport logging.handlers\n\nclass itsLogger:\n # DEBUG (D)๋””๋ฒ„๊น…์šฉ ๋กœ๊ทธ\n # INFO (I)๋„์›€์ด ๋˜๋Š” ์ •๋ณด\n # WARNING (W)์ฃผ์˜ ํ‘œ์‹œ\n # ERROR (E)์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ์ข…๋ฃŒ\n # CRITICAL (C)์‹ฌ๊ฐํ•œ ์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ๋น„์ •์ƒ์ข…๋ฃŒ\n\n logger = ''\n streamHandler = ''\n formatStr = '[%(levelname)-8s] %(asctime)s > %(message)s'\n\n def __init__(self):\n # ๋กœ๊ฑฐ ์ธ์Šคํ„ด์Šค๋ฅผ ๋งŒ๋“ ๋‹ค\n self.logger = logging.getLogger()\n # ํฌ๋งคํ„ฐ๋ฅผ ๋งŒ๋“ ๋‹ค\n #fomatter = logging.Formatter(self.formatStr)\n # ์ŠคํŠธ๋ฆผ๊ณผ ํŒŒ์ผ๋กœ ๋กœ๊ทธ๋ฅผ ์ถœ๋ ฅํ•˜๋Š” ํ•ธ๋“ค๋Ÿฌ๋ฅผ ๊ฐ๊ฐ ๋งŒ๋“ ๋‹ค.\n #fileHandler = logging.FileHandler('./myLoggerTest.log')\n self.streamHandler = logging.StreamHandler()\n\n self.setFormatStr(self.formatStr)\n\n # ๊ฐ ํ•ธ๋“ค๋Ÿฌ์— ํฌ๋งคํ„ฐ๋ฅผ ์ง€์ •ํ•œ๋‹ค.\n #fileHandler.setFormatter(fomatter)\n #streamHandler.setFormatter(fomatter)\n # ๋กœ๊ฑฐ ์ธ์Šคํ„ด์Šค์— ์ŠคํŠธ๋ฆผ ํ•ธ๋“ค๋Ÿฌ์™€ ํŒŒ์ผํ•ธ๋“ค๋Ÿฌ๋ฅผ ๋ถ™์ธ๋‹ค.\n #logger.addHandler(fileHandler)\n #logger.addHandler(streamHandler)\n\n def setFormatStr(self, newFormatStr):\n #self.formatStr = newFormatStr\n fomatter = logging.Formatter(newFormatStr)\n self.streamHandler.setFormatter(fomatter)\n self.logger.addHandler(self.streamHandler)\n\n def addFileHandler(self, logFileName):\n self.fileHandler = logging.FileHandler(logFileName)\n\n def addStreamHandler(self):\n self.streamHandler = logging.StreamHandler()\n\n def writeLog(self, logLevel, msg):\n if logLevel.upper() == \"DEBUG\" or logLevel == \"D\":\n self.logger.debug(msg)\n elif logLevel.upper() == \"INFO\" or logLevel == \"I\":\n self.logger.info(msg)\n elif logLevel.upper() == \"WARNING\" or logLevel == \"W\":\n self.logger.warning(msg)\n elif logLevel.upper() == \"ERROR\" or logLevel == \"E\":\n self.logger.error(msg)\n elif logLevel.upper() == \"CRITICAL\" or logLevel == \"C\":\n self.logger.critical(msg)\n else:\n self.logger.warning(msg)\n\n def setLogLevel(self, logLevel=\"WARNING\"):\n if logLevel.upper() == \"DEBUG\" or logLevel == \"D\":\n self.logger.setLevel(logging.DEBUG)\n elif logLevel.upper() == \"INFO\" or logLevel == \"I\":\n self.logger.setLevel(logging.INFO)\n elif logLevel.upper() == \"WARNING\" or logLevel == \"W\":\n self.logger.setLevel(logging.WARNING)\n elif logLevel.upper() == \"ERROR\" or logLevel == \"E\":\n self.logger.setLevel(logging.ERROR)\n elif logLevel.upper() == \"CRITICAL\" or logLevel == \"C\":\n self.logger.setLevel(logging.CRITICAL)\n else:\n self.logger.setLevel(logging.WARNING)\n\n\nif __name__ == \"__main__\":\n logger = itsLogger()\n logger.setLogLevel(\"DEBUG\")\n logger.writeLog(\"DEBUG\", \"๋””๋ฒ„๊น…์šฉ ๋กœ๊ทธ\")\n logger.writeLog(\"INFO\", \"๋„์›€์ด ๋˜๋Š” ์ •๋ณด\")\n logger.writeLog(\"WARNING\", \"์ฃผ์˜ ํ‘œ์‹œ\")\n logger.writeLog(\"ERROR\", \"์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ์ข…๋ฃŒ\")\n logger.writeLog(\"CRITICAL\", \"์‹ฌ๊ฐํ•œ ์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ๋น„์ •์ƒ์ข…๋ฃŒ\")\n #logger.setLogLevel()\n logger.writeLog(\"D\", \"๋””๋ฒ„๊น…์šฉ ๋กœ๊ทธ\")\n logger.writeLog(\"I\", \"๋„์›€์ด ๋˜๋Š” ์ •๋ณด\")\n logger.writeLog(\"W\", \"์ฃผ์˜ ํ‘œ์‹œ\")\n logger.writeLog(\"E\", \"์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ์ข…๋ฃŒ\")\n logger.writeLog(\"C\", \"์‹ฌ๊ฐํ•œ ์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ๋น„์ •์ƒ์ข…๋ฃŒ\")\n logger.setFormatStr('[%(levelname)-8s]>%(message)s')\n logger.writeLog(\"D\", \"๋””๋ฒ„๊น…์šฉ ๋กœ๊ทธ\")\n logger.writeLog(\"I\", \"๋„์›€์ด ๋˜๋Š” ์ •๋ณด\")\n logger.writeLog(\"W\", \"์ฃผ์˜ ํ‘œ์‹œ\")\n logger.writeLog(\"E\", \"์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ์ข…๋ฃŒ\")\n logger.writeLog(\"C\", \"์‹ฌ๊ฐํ•œ ์—๋Ÿฌ, ํ”„๋กœ๊ทธ๋žจ ๋น„์ •์ƒ์ข…๋ฃŒ\")\n", "sub_path": "script/itsLogger.py", "file_name": "itsLogger.py", "file_ext": "py", "file_size_in_byte": 3885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 64, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 66, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 70, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 72, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 74, "usage_type": "attribute"}]} {"seq_id": "603761568", "text": "from glob import glob\nfrom os.path import basename\nfrom os.path import splitext\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\ninstall_requires = [\n 'pretty-json',\n 'Pygments',\n 'pygments-json',\n 'pygments-solarized',\n 'requests'\n]\n\nsetup(name='playable',\n version='1.1',\n description='Playout assets tool',\n author='Alan So',\n author_email='alansoandso@gmail.com',\n packages=['playable'],\n include_package_data=True,\n entry_points={'console_scripts': ['playable = playable.tool:command_line_runner', ]},\n install_requires=install_requires\n )\n\n\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "setuptools.setup", "line_number": 16, "usage_type": "call"}]} {"seq_id": "379424310", "text": "###############################################################################\n# Copyright (c) 2019, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory\n# Written by the Merlin dev team, listed in the CONTRIBUTORS file.\n# \n#\n# LLNL-CODE-797170\n# All rights reserved.\n# This file is part of merlin-spellbook.\n#\n# For details, see https://github.com/LLNL/merlin-spellbook and\n# https://github.com/LLNL/merlin.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n###############################################################################\n\nimport argparse\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport numpy as np\nimport sys\n\nfrom sklearn.ensemble import RandomForestRegressor\n\n\nFOREST_DEFAULTS = {\n 'max_depth': 2,\n 'random_state': 0,\n 'n_estimators': 100,\n}\n\n\ndef load_infile(args):\n with np.load(args.infile) as data:\n if args.X is not None:\n X = stack_arrays(data, args.X) # inputs\n elif \"X\" in data.keys():\n X = data['X']\n else:\n X = data[data.files[0]]\n\n if args.y is not None:\n y = stack_arrays(data, args.y) # outputs\n elif \"y\" in data.keys():\n y = data['y']\n else:\n y = data[data.files[1]]\n return X, y\n\n\ndef random_forest(args):\n forest_args = FOREST_DEFAULTS\n\n regr = RandomForestRegressor(**forest_args)\n X, y = load_infile(args)\n\n n_samples_X = X.shape[0]\n n_samples_y = y.shape[0]\n\n if n_samples_X != n_samples_y:\n raise ValueError(\"n_samples_X != n_samples_y\")\n\n # single feature or sample reshape as appropriate for sklearn\n if n_samples_y == 1:\n y.reshape((1, -1))\n elif len(y.shape) == 1:\n y.reshape((-1, 1))\n\n regr.fit(X, y)\n with open(args.outfile, \"wb\") as f:\n pickle.dump(regr, f)\n\n\ndef stack_arrays(data, delimited_names, delimiter=','):\n stacked = np.vstack([data[name]\n for name in delimited_names.split(delimiter)])\n return stacked.T\n\n\ndef setup_argparse():\n parser = argparse.ArgumentParser(\n description='Use sklearn to make a random forest regressor')\n parser.add_argument('-infile', help='.npz file with X and y data', default='results.npz')\n parser.add_argument('-X', help='variable(s) in infile for the input, defaults to X; can be a comma-delimited list')\n parser.add_argument('-y', help='variable(s) in infile for the output, defaults to y')\n parser.add_argument('-outfile', help='file to pickle the regressor to', default='random_forest_reg.pkl')\n return parser\n\n\ndef main():\n parser = setup_argparse()\n args = parser.parse_args()\n random_forest(args)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "sub_path": "shared/learn.py", "file_name": "learn.py", "file_ext": "py", "file_size_in_byte": 3790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.load", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 71, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 92, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 114, "usage_type": "call"}]} {"seq_id": "108079873", "text": "from ase.build import fcc111\nfrom ase.calculators.vasp import Vasp\nAu = fcc111(symbol='Au',a=4.15402,size=(2,2,4),vacuum=10)# Using optimized lattice constant to build Au_fcc111 surface with 4 layers of Au \n\n\nAu.pbc = True\ncalc4 = Vasp(xc='PBE', #Level of theory (density functional)\n encut=520, #plane-wave kinetic energy cutoff (convergence test; I usually use about 520)\n kpts=[8,8,1], #number of k-points, e.g. [3,3,3] (convergence test; fewer kpoints are needed if lattice dimension is large)\n prec='Accurate', #accuracy (don't change this)\n sigma=0.01, #don't need to change (smearing width)\n ismear=0, #don't need to change (smearing method)\n isym=0, #disables symmetry\n ivdw=12, #enable van der Waals interactions\n setups='recommended')\n\nAu.set_calculator(calc4)\n\nAu.get_potential_energy()", "sub_path": "Density_Functional_theory/Au_surface/Convergence_test/Au_FCC111_layers/4layers/layers4.py", "file_name": "layers4.py", "file_ext": "py", "file_size_in_byte": 885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "ase.build.fcc111", "line_number": 3, "usage_type": "call"}, {"api_name": "ase.calculators.vasp.Vasp", "line_number": 7, "usage_type": "call"}]} {"seq_id": "331966992", "text": "import urllib.request\nfrom bs4 import BeautifulSoup as bs\n\nfrom Functions.RandomStringDigits import randomStringDigits\n\n\ndef newsweek():\n nHeadlines=dict()\n newsweek=\"https://www.newsweek.pl/\"\n page= urllib.request.urlopen(newsweek)\n soup = bs(page, features=\"html.parser\")\n # print(soup.prettify())\n\n headlines=soup.find_all(\"h2\", class_=\"artTitle\")\n for headline in headlines:\n # print(headline)\n if headline.string is not None:\n key = randomStringDigits(8)\n value = str(headline.string)\n nHeadlines[key] = value\n return nHeadlines\n", "sub_path": "Scrapers/WebScraperNewsweek.py", "file_name": "WebScraperNewsweek.py", "file_ext": "py", "file_size_in_byte": 605, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 10, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 10, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "Functions.RandomStringDigits.randomStringDigits", "line_number": 18, "usage_type": "call"}]} {"seq_id": "627953482", "text": "import os\nimport time\nimport argparse\nimport shutil\n\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\n\nimport config\nfrom utils.losses import *\nfrom vgg16 import VGG16\nfrom nets.msgnet import transformer\n\ntf.set_random_seed(2)\nnp.random.seed(2)\n\ndef train(style_img_path, content_img_path, num_epochs, learning_rate, style_size, \n content_size, log_dir, style_loss_weights, content_loss_weights, reg_loss_weight, \n vgg_weights_path, ckpt_dir, log_iter=100, sample_iter=100, content_batch_size=4):\n \n \n iterator = tf.keras.preprocessing.image.DirectoryIterator\n datagen = tf.keras.preprocessing.image.ImageDataGenerator()\n content_iter = iterator(directory=content_img_path, batch_size=content_batch_size, \\\n target_size=(content_size, content_size), image_data_generator=datagen, shuffle=True, seed=2)\n style_iter = iterator(directory=style_img_path, batch_size=1, target_size=\\\n (style_size, style_size), image_data_generator=datagen, seed=2)\n\n total_iteration = num_epochs * content_iter.n // content_batch_size\n \n vgg_weights = np.load(vgg_weights_path)\n\n tf.reset_default_graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n training_graph = tf.Graph()\n \n with training_graph.as_default() as g, tf.Session(config=config) as sess:\n\n s_placeholder = tf.placeholder(name='style', dtype=tf.float32, shape=[1, style_size, style_size, 3])\n c_placeholder = tf.placeholder(name='content', dtype=tf.float32, shape=[content_batch_size, content_size, content_size, 3])\n \n target_style_features = VGG16(s_placeholder, vgg_weights)\n target_content_features = VGG16(c_placeholder, vgg_weights)\n style_gram = transformer(s_placeholder)\n\n transferred = transformer(c_placeholder, style_gram)\n transferred_features = VGG16(transferred, vgg_weights)\n \n loss = loss_func(target_style_features, target_content_features, transferred_features,\n transferred, style_loss_weights, content_loss_weights, reg_loss_weight)\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n loss_summary = tf.summary.scalar('loss', tf.squeeze(loss))\n style_summary = tf.summary.image('style', s_placeholder)\n content_summary = tf.summary.image('content', c_placeholder)\n transferred_summary = tf.summary.image('transferred', transferred)\n image_summary = tf.summary.merge([style_summary, content_summary, transferred_summary])\n\n summary = tf.summary.FileWriter(graph=g, logdir=log_dir)\n\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(var_list=tf.trainable_variables())\n\n start = time.time()\n \n for epoch in range(num_epochs):\n i = 0\n content_iter.reset()\n style_iter.reset()\n for c, _ in content_iter:\n if i+1 == (content_iter.n // content_batch_size) :\n break\n \n try:\n s, _ = style_iter.next()\n except StopIteration:\n style_iter.reset()\n s, _ = style_iter.next()\n\n _, cur_loss, cur_loss_summary, cur_image_summary \\\n = sess.run([optimizer, loss, loss_summary, image_summary], feed_dict={s_placeholder: s, c_placeholder: c})\n \n if (i+1) % log_iter == 0:\n print(\"Iteration: {0}, loss: {1}\".format(epoch*content_iter.n // 4 + i+1, cur_loss))\n \n summary.add_summary(cur_loss_summary, epoch*content_iter.n // 4 + i+1)\n \n if (i+1) % sample_iter == 0:\n summary.add_summary(cur_image_summary, epoch*content_iter.n // 4 + i+1)\n\n summary.flush()\n i += 1 \n save_path = os.path.join(ckpt_dir, 'ckpt')\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir, exist_ok=True)\n ckpt_path = saver.save(sess, save_path, write_meta_graph=False, global_step=epoch*content_iter.n // 4 + i+1)\n print(\"Checkpoint saved as: {ckpt_path}\".format(ckpt_path=ckpt_path))\n \n end = time.time()\n\n print(\"Finished {num_iters} iterations in {time} seconds.\".format(num_iters=total_iteration, time=end-start))\n \ndef export_saved_model(style_size, ckpt_dir, export_dir):\n if os.path.exists(export_dir):\n shutil.rmtree(export_dir)\n\n tf.reset_default_graph()\n eval_graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n exporter = tf.saved_model.builder.SavedModelBuilder(export_dir)\n latest_ckpt = tf.train.latest_checkpoint(ckpt_dir)\n\n with eval_graph.as_default() as g, tf.Session(config=config, graph=eval_graph) as sess:\n s_placeholder = tf.placeholder(name='style', dtype=tf.float32, shape=[1, style_size, style_size, 3])\n inputs = tf.placeholder(name='inputs', dtype=tf.float32, shape=[None, None, None, 3])\n \n style_gram_out = tf.identity(transformer(s_placeholder), name='style_gram_out')\n style_gram_in = tf.placeholder(name='style_gram_in', dtype=tf.float32, shape=style_gram_out.shape)\n outputs = tf.identity(transformer(inputs, style_gram_in), name='outputs')\n saver = tf.train.Saver()\n saver.restore(sess, latest_ckpt)\n\n exporter.add_meta_graph_and_variables(\n sess,\n tags=[tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n tf.saved_model.signature_def_utils.predict_signature_def(inputs={'inputs': inputs,\n 'style': s_placeholder, \n 'style_gram_in':style_gram_in},\n outputs={'outputs': outputs, \n 'style_gram_out': style_gram_out})\n\n })\n \n exporter.save()\n\ndef eval_with_saved_model(saved_model_dir, style_img, content_img, style_size):\n style_img = tf.keras.preprocessing.image.img_to_array(img=tf.keras.preprocessing.image.load_img(style_img, target_size=(style_size, style_size)))\n content_img = tf.keras.preprocessing.image.img_to_array(img=tf.keras.preprocessing.image.load_img(content_img))\n\n tf.reset_default_graph()\n eval_graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n\n with eval_graph.as_default() as g, tf.Session(config=config, graph=eval_graph) as sess:\n tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], saved_model_dir)\n style = g.get_tensor_by_name('style:0')\n inputs = g.get_tensor_by_name('inputs:0')\n outputs = g.get_tensor_by_name('outputs:0')\n style_gram_out = g.get_tensor_by_name('style_gram_out:0')\n style_gram_in = g.get_tensor_by_name('style_gram_in:0')\n\n c, s = sess.run([tf.expand_dims(content_img, axis=0), tf.expand_dims(style_img, axis=0)])\n style_gram = sess.run(style_gram_out, feed_dict={style:s})\n start = time.time()\n result = sess.run(tf.squeeze(outputs), feed_dict={inputs:c, style_gram_in:style_gram})\n end = time.time()\n print('Inference time: {time} seconds'.format(time=end-start))\n Image.fromarray(result.astype('uint8')).save('imgs/result/msgnet_result.jpg')\n\ndef main(args):\n if args.train:\n train(config.STYLE_IMG_PATH,\n config.CONTENT_IMG_PATH,\n num_epochs=config.epochs,\n learning_rate=config.learning_rate,\n style_size=config.style_img_size,\n content_size=config.content_img_size,\n log_dir=config.log_dir,\n style_loss_weights=config.style_loss_weights,\n content_loss_weights=config.content_loss_weights,\n reg_loss_weight=config.reg_loss_weight,\n vgg_weights_path='/home/ubuntu/weights/vgg16_weights.npz', \n ckpt_dir=config.ckpt_dir)\n else:\n export_saved_model(config.style_img_size, './ckpt/msgnet', './saved_model/msgnet')\n eval_with_saved_model('./saved_model/msgnet', config.STYLE_IMG, config.CONTENT_IMG, config.style_img_size)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', default=False)\n args = parser.parse_args()\n main(args)\n", "sub_path": "train_msgnet.py", "file_name": "train_msgnet.py", "file_ext": "py", "file_size_in_byte": 8717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 35, "usage_type": "call"}, {"api_name": "config.gpu_options", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.Graph", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "vgg16.VGG16", "line_number": 44, "usage_type": "call"}, {"api_name": "vgg16.VGG16", "line_number": 45, "usage_type": "call"}, {"api_name": "nets.msgnet.transformer", "line_number": 46, "usage_type": "call"}, {"api_name": "nets.msgnet.transformer", "line_number": 48, "usage_type": "call"}, {"api_name": "vgg16.VGG16", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.summary.image", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.image", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.trainable_variables", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 113, "usage_type": "call"}, {"api_name": "config.gpu_options", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.builder.SavedModelBuilder", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tensorflow.identity", "line_number": 122, "usage_type": "call"}, {"api_name": "nets.msgnet.transformer", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.identity", "line_number": 124, "usage_type": "call"}, {"api_name": "nets.msgnet.transformer", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model", "line_number": 132, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.signature_def_utils.predict_signature_def", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.image.img_to_array", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.image.load_img", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.img_to_array", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.image.load_img", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 149, "usage_type": "call"}, {"api_name": "config.gpu_options", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.saved_model.loader.load", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 153, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 160, "usage_type": "call"}, {"api_name": "time.time", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 163, "usage_type": "call"}, {"api_name": "time.time", "line_number": 164, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 166, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 166, "usage_type": "name"}, {"api_name": "config.STYLE_IMG_PATH", "line_number": 170, "usage_type": "attribute"}, {"api_name": "config.CONTENT_IMG_PATH", "line_number": 171, "usage_type": "attribute"}, {"api_name": "config.epochs", "line_number": 172, "usage_type": "attribute"}, {"api_name": "config.learning_rate", "line_number": 173, "usage_type": "attribute"}, {"api_name": "config.style_img_size", "line_number": 174, "usage_type": "attribute"}, {"api_name": "config.content_img_size", "line_number": 175, "usage_type": "attribute"}, {"api_name": "config.log_dir", "line_number": 176, "usage_type": "attribute"}, {"api_name": "config.style_loss_weights", "line_number": 177, "usage_type": "attribute"}, {"api_name": "config.content_loss_weights", "line_number": 178, "usage_type": "attribute"}, {"api_name": "config.reg_loss_weight", "line_number": 179, "usage_type": "attribute"}, {"api_name": "config.ckpt_dir", "line_number": 181, "usage_type": "attribute"}, {"api_name": "config.style_img_size", "line_number": 183, "usage_type": "attribute"}, {"api_name": "config.STYLE_IMG", "line_number": 184, "usage_type": "attribute"}, {"api_name": "config.CONTENT_IMG", "line_number": 184, "usage_type": "attribute"}, {"api_name": "config.style_img_size", "line_number": 184, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 187, "usage_type": "call"}]} {"seq_id": "84731487", "text": "# -*- coding:utf-8 -*-\nimport numpy as np\nimport librosa\n\ndef to_mfcc(file, n_mfcc=12, rate=16000):\n ## -----*----- ้Ÿณๅฃฐใƒ‡ใƒผใ‚ฟใ‚’MFCCใซๅค‰ๆ› -----*----- ##\n x, fs = librosa.load(file, sr=rate)\n mfcc = librosa.feature.mfcc(x, sr=fs, n_mfcc=n_mfcc)\n mfcc = np.reshape(mfcc, (mfcc.shape[0], mfcc.shape[1], 1))\n return np.array(mfcc, dtype=np.float32)\n\n\nif __name__ == '__main__':\n wav_file = 'your .wav file path'\n to_mfcc(wav_file)\n", "sub_path": "to_mfcc.py", "file_name": "to_mfcc.py", "file_ext": "py", "file_size_in_byte": 458, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "librosa.load", "line_number": 7, "usage_type": "call"}, {"api_name": "librosa.feature.mfcc", "line_number": 8, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 10, "usage_type": "attribute"}]} {"seq_id": "102687819", "text": "from sklearn.metrics.pairwise import cosine_similarity\n\npos2pos = {\"ADJ\":\"a\", \"ADV\":\"r\", \"NOUN\":\"n\", \"VERB\":\"v\"}\n\ndef get_lemma2syn(f_dictionary):\n dictionary = open(f_dictionary, \"r\").readlines()\n lemma2syn = {}\n for line in dictionary:\n fields = line.strip().split(\" \")\n lemma, synsets = fields[0], [syn[:10] for syn in fields[1:]]\n lemma2syn[lemma] = synsets\n return lemma2syn\n\ndef calculate_accuracy(outs, gold_synsets, lemmas, pos_filters, embeddings, dictionary):\n lemma2syn = get_lemma2syn(dictionary)\n count_correct = 0\n count_all = 0\n unavailable_syn_emb = set()\n unavailable_syn_cases = 0\n # out.write(\"Lemma\\tSelected synset\\tGold synset\\tDistance\\n\")\n for count, gold in enumerate(gold_synsets):\n # if using the test data, use \"test_lemmas\", otherwise use \"train_lemmas\"\n lemma = lemmas[count]\n pos = pos_filters[count]\n if lemma in lemma2syn:\n possible_syns = lemma2syn[lemma]\n else:\n count_all += 1\n continue\n output = outs[count]\n max_sim = -10000.0\n selected_syn = \"\"\n for syn in possible_syns:\n if pos2pos[pos] != syn.split(\"-\")[1]:\n continue\n if syn in embeddings:\n cos_sim = cosine_similarity(output.reshape(1,-1), embeddings[syn].reshape(1,-1))[0][0]\n else:\n unavailable_syn_cases += 1\n unavailable_syn_emb.add(syn)\n cos_sim = 0.0\n if cos_sim > max_sim:\n max_sim = cos_sim\n selected_syn = syn\n # gold_cos_sim = cosine_similarity(output.reshape(1,-1), embeddings[gold].reshape(1,-1))[0][0]\n # line_to_write = lemma + \"\\t\" + selected_syn + \"\\t\" + gold + \"\\t\" + str(max_sim - gold_cos_sim) + \"\\n\"\n # out.write(line_to_write)\n if selected_syn in gold:\n count_correct += 1\n count_all += 1\n return count_correct, count_all", "sub_path": "calc_accuracy.py", "file_name": "calc_accuracy.py", "file_ext": "py", "file_size_in_byte": 1984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 37, "usage_type": "call"}]} {"seq_id": "330604422", "text": "from django.conf.urls import url\nfrom Inventario.views import *\nfrom django.views.decorators.csrf import csrf_exempt\nurlpatterns = [\n # ------------- urls base --------------------------------------------\n url(r'^insumo/$',InsumoList.as_view(), name='InsumoList'),\n url(r'^insumoupdate/(?P[\\w\\s]+)/$',InsumoUpdate.as_view(), name='InsumoUpdate'),\n url(r'^insumodelete/(?P[\\w\\s]+)/$',InsumoDelete.as_view(), name='InsumoDelete'),\n\n url(r'^tipoinsumo/$',TipoInsumoList.as_view(), name='TipoInsumoList'),\n url(r'^tipoinsumoupdate/(?P[\\w\\s]+)/$',TipoInsumoUpdate.as_view(), name='TipoInsumoUpdate'),\n url(r'^tipoinsumoDelete/(?P[\\w\\s]+)/$',TipoInsumoDelete.as_view(), name='TipoInsumoDelete'),\n\n url(r'^producto/$',ProductoList.as_view(), name='ProductoList'),\n url(r'^productoupdate/(?P[\\w\\s]+)/$',ProductoUpdate.as_view(), name='ProductoUpdate'),\n url(r'^productodelete/(?P[\\w\\s]+)/$',ProductoDelete.as_view(), name='ProductoDelete'),\n\n url(r'^promocion/$',PromocionList.as_view(), name='PromocionList'),\n url(r'^promocionupdate/(?P[\\w\\s]+)/$',PromocionUpdate.as_view(), name='PromocionUpdate'),\n url(r'^promociondelete/(?P[\\w\\s]+)/$',PromocionDelete.as_view(), name='PromocionDelete'),\n\n url(r'^promocionproducto/$',PromocionProductoList.as_view(), name='PromocionProductoDetail'),\n # ----------- fin urls base ------------------------------------------- \n url(r'^insumo/(?P\\d+)$',InsumoDetail, name='InsumoDetail'),\n url(r'^promocion/(?P\\d+)$',PromocionDetail, name='PromocionDetail'),\n\n]", "sub_path": "Inventario/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}]} {"seq_id": "355156369", "text": "import re\n\nfrom typing import OrderedDict\nimport _strptime # NOQA fixes _strptime deferred import issue\n\nfrom snuba.query.parsing import ParsingContext\nfrom snuba.query.query import Query\nfrom snuba.query.schema import POSITIVE_OPERATORS\nfrom snuba.util import (\n alias_expr,\n escape_alias,\n escape_literal,\n function_expr,\n is_condition,\n is_function,\n QUOTED_LITERAL_RE,\n)\n\nQUALIFIED_COLUMN_REGEX = re.compile(r\"^([a-zA-Z_][a-zA-Z0-9_]*)\\.([a-zA-Z0-9_\\.\\[\\]]+)$\")\n\n\nclass InvalidConditionException(Exception):\n pass\n\n\ndef column_expr(dataset, column_name, query: Query, parsing_context: ParsingContext, alias=None, aggregate=None):\n \"\"\"\n Certain special column names expand into more complex expressions. Return\n a 2-tuple of:\n (expanded column expression, sanitized alias)\n\n Needs the body of the request for some extra data used to expand column expressions.\n \"\"\"\n assert column_name or aggregate\n assert not aggregate or (aggregate and (column_name or alias))\n column_name = column_name or ''\n\n if is_function(column_name, 0):\n return complex_column_expr(dataset, column_name, query, parsing_context)\n elif isinstance(column_name, (list, tuple)) and aggregate:\n return complex_column_expr(dataset, [aggregate, column_name, alias], query, parsing_context)\n elif isinstance(column_name, str) and QUOTED_LITERAL_RE.match(column_name):\n return escape_literal(column_name[1:-1])\n else:\n expr = dataset.column_expr(column_name, query, parsing_context)\n\n if aggregate:\n expr = function_expr(aggregate, expr)\n\n alias = escape_alias(alias or column_name)\n return alias_expr(expr, alias, parsing_context)\n\n\ndef complex_column_expr(dataset, expr, query: Query, parsing_context: ParsingContext, depth=0):\n function_tuple = is_function(expr, depth)\n if function_tuple is None:\n raise ValueError('complex_column_expr was given an expr %s that is not a function at depth %d.' % (expr, depth))\n\n name, args, alias = function_tuple\n out = []\n i = 0\n while i < len(args):\n next_2 = args[i:i + 2]\n if is_function(next_2, depth + 1):\n out.append(complex_column_expr(dataset, next_2, query, parsing_context, depth + 1))\n i += 2\n else:\n nxt = args[i]\n if is_function(nxt, depth + 1): # Embedded function\n out.append(complex_column_expr(dataset, nxt, query, parsing_context, depth + 1))\n elif isinstance(nxt, str):\n out.append(column_expr(dataset, nxt, query, parsing_context))\n else:\n out.append(escape_literal(nxt))\n i += 1\n\n ret = function_expr(name, ', '.join(out))\n if alias:\n ret = alias_expr(ret, alias, parsing_context)\n return ret\n\n\ndef conditions_expr(dataset, conditions, query: Query, parsing_context: ParsingContext, depth=0):\n \"\"\"\n Return a boolean expression suitable for putting in the WHERE clause of the\n query. The expression is constructed by ANDing groups of OR expressions.\n Expansion of columns is handled, as is replacement of columns with aliases,\n if the column has already been expanded and aliased elsewhere.\n \"\"\"\n from snuba.clickhouse.columns import Array\n\n if not conditions:\n return ''\n\n if depth == 0:\n # dedupe conditions at top level, but keep them in order\n sub = OrderedDict((conditions_expr(dataset, cond, query, parsing_context, depth + 1), None) for cond in conditions)\n return u' AND '.join(s for s in sub.keys() if s)\n elif is_condition(conditions):\n lhs, op, lit = dataset.process_condition(conditions)\n\n # facilitate deduping IN conditions by sorting them.\n if op in ('IN', 'NOT IN') and isinstance(lit, tuple):\n lit = tuple(sorted(lit))\n\n # If the LHS is a simple column name that refers to an array column\n # (and we are not arrayJoining on that column, which would make it\n # scalar again) and the RHS is a scalar value, we assume that the user\n # actually means to check if any (or all) items in the array match the\n # predicate, so we return an `any(x == value for x in array_column)`\n # type expression. We assume that operators looking for a specific value\n # (IN, =, LIKE) are looking for rows where any array value matches, and\n # exclusionary operators (NOT IN, NOT LIKE, !=) are looking for rows\n # where all elements match (eg. all NOT LIKE 'foo').\n columns = dataset.get_dataset_schemas().get_read_schema().get_columns()\n if (\n isinstance(lhs, str) and\n lhs in columns and\n isinstance(columns[lhs].type, Array) and\n columns[lhs].base_name != query.get_arrayjoin() and\n not isinstance(lit, (list, tuple))\n ):\n any_or_all = 'arrayExists' if op in POSITIVE_OPERATORS else 'arrayAll'\n return u'{}(x -> assumeNotNull(x {} {}), {})'.format(\n any_or_all,\n op,\n escape_literal(lit),\n column_expr(dataset, lhs, query, parsing_context)\n )\n else:\n return u'{} {} {}'.format(\n column_expr(dataset, lhs, query, parsing_context),\n op,\n escape_literal(lit)\n )\n\n elif depth == 1:\n sub = (conditions_expr(dataset, cond, query, parsing_context, depth + 1) for cond in conditions)\n sub = [s for s in sub if s]\n res = u' OR '.join(sub)\n return u'({})'.format(res) if len(sub) > 1 else res\n else:\n raise InvalidConditionException(str(conditions))\n", "sub_path": "snuba/query/columns.py", "file_name": "columns.py", "file_ext": "py", "file_size_in_byte": 5700, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "snuba.query.query.Query", "line_number": 26, "usage_type": "name"}, {"api_name": "snuba.query.parsing.ParsingContext", "line_number": 26, "usage_type": "name"}, {"api_name": "snuba.util.is_function", "line_number": 38, "usage_type": "call"}, {"api_name": "snuba.util.QUOTED_LITERAL_RE.match", "line_number": 42, "usage_type": "call"}, {"api_name": "snuba.util.QUOTED_LITERAL_RE", "line_number": 42, "usage_type": "name"}, {"api_name": "snuba.util.escape_literal", "line_number": 43, "usage_type": "call"}, {"api_name": "snuba.util.function_expr", "line_number": 48, "usage_type": "call"}, {"api_name": "snuba.util.escape_alias", "line_number": 50, "usage_type": "call"}, {"api_name": "snuba.util.alias_expr", "line_number": 51, "usage_type": "call"}, {"api_name": "snuba.query.query.Query", "line_number": 54, "usage_type": "name"}, {"api_name": "snuba.query.parsing.ParsingContext", "line_number": 54, "usage_type": "name"}, {"api_name": "snuba.util.is_function", "line_number": 55, "usage_type": "call"}, {"api_name": "snuba.util.is_function", "line_number": 64, "usage_type": "call"}, {"api_name": "snuba.util.is_function", "line_number": 69, "usage_type": "call"}, {"api_name": "snuba.util.escape_literal", "line_number": 74, "usage_type": "call"}, {"api_name": "snuba.util.function_expr", "line_number": 77, "usage_type": "call"}, {"api_name": "snuba.util.alias_expr", "line_number": 79, "usage_type": "call"}, {"api_name": "snuba.query.query.Query", "line_number": 83, "usage_type": "name"}, {"api_name": "snuba.query.parsing.ParsingContext", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.OrderedDict", "line_number": 97, "usage_type": "call"}, {"api_name": "snuba.util.is_condition", "line_number": 99, "usage_type": "call"}, {"api_name": "snuba.clickhouse.columns.Array", "line_number": 119, "usage_type": "argument"}, {"api_name": "snuba.query.schema.POSITIVE_OPERATORS", "line_number": 123, "usage_type": "name"}, {"api_name": "snuba.util.escape_literal", "line_number": 127, "usage_type": "call"}, {"api_name": "snuba.util.escape_literal", "line_number": 134, "usage_type": "call"}]} {"seq_id": "3085936", "text": "import torch\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfrom sklearn.metrics import jaccard_similarity_score\nfrom sklearn.metrics import roc_curve, auc, precision_recall_curve, f1_score, average_precision_score\nfrom sklearn.preprocessing import label_binarize\nfrom scipy import interp\nimport numpy as np\nfrom PIL import Image\nimport os\n\nfrom losses import DiceLossMultiClass\nfrom load import DataLoad \nnp.set_printoptions(threshold=np.nan)\n\nclass Metric(object):\n \"\"\"\n Various metrics, including the dice coefficient for, for individual examples. \n This method currently does not take multi-class into account or binary one-hot vectors \n for that matter. We need to change it as soon as possible.\n \"\"\"\n def __init__(self, compute_jaccard=False, device=\"cuda\"):\n print(\"Initiated Metric Evaluation\")\n self.compute_jaccard = compute_jaccard\n self.device = device\n \n def dice(self, input, target):\n ''' \n Given an input and target compute the dice score\n between both. Dice: 1 - (2 * |A U B| / (|A| + |B|))\n '''\n eps = 1e-6\n if len(input.shape) > 1:\n input, target = input.view(-1), target.view(-1)\n else:\n input, target = torch.tensor(input.astype(float)), torch.tensor(target.astype(float))\n inter = torch.dot(input, target)\n union = torch.sum(input) + torch.sum(target) + eps\n dice = (2 * inter.float() + eps) / union.float()\n return dice\n\n def pixel_wise(self, input, target):\n \"\"\"\n Regular pixel_wise accuracy metric, we just\n compare the number of positives and divide it\n by the number of pixels.\n \"\"\"\n # Flatten the matrices to make it comparable\n input = input.view(-1)\n target = target.view(-1)\n correct = torch.sum(input==target)\n return (correct.item() / len(input))\n \n def jaccard(self, prediction, mask):\n \"\"\"\n Given a one dimension prediction vector and\n one dimensional mask vector, compute the jaccard\n similarity metric using SciKit-Learn, the vectors\n are required to be numpy vectors. \n \n TODO: Implement torch version\n \"\"\"\n return jaccard_similarity_score(prediction, mask)\n \n def evaluate(self, net, dataset, t=0):\n \"\"\"\n Evaluation without the densecrf (prediction.py, we are not using\n that method, eval_net suffices to predict the images and debugging) \n with the dice coefficient or some other metric.\n \"\"\"\n # Tell pytorch we are evaluating\n net.eval()\n total_accuracy, total_dice, total_jaccard = 0, 0, 0\n # To make sure we do not get memory errors\n epoch_loss, h = 0, 0\n with torch.no_grad():\n for i, batch in enumerate(dataset):\n img, true_mask = batch\n # send both to the gpu for evaluation\n img = torch.from_numpy(img).unsqueeze(0).to(self.device)\n true_mask = torch.from_numpy(true_mask).unsqueeze(0).to(self.device)\n # Make sure we are using binary values\n true_mask = (true_mask > 0).float()\n # The model returns logits, so we need it to pass through\n # a sigmoid to get the probabilities\n mask_pred = torch.sigmoid(net(img))\n # threshold the predictions to get the hard per pixel classification\n # for visualization, the probabilities will do just fine\n mask_pred_s = (mask_pred > 0.5).float()\n # beware that, for the moment, we need to pass it the \n # hard per pixel classification\n model_accuracy = self.pixel_wise(mask_pred_s, true_mask)\n dice = self.dice(mask_pred_s,true_mask)\n jaccard = None\n if self.compute_jaccard: \n mask_squashed = mask_pred_s.view(-1)\n true_mask_squashed = true_mask.view(-1)\n jaccard = self.jaccard(mask_pred_s, true_mask)\n total_jaccard += jaccard\n \n total_accuracy += model_accuracy\n total_dice += dice.item()\n print(dice.item())\n self.save_images(img, mask_pred, true_mask, False, t)\n h += 1\n \n if not self.compute_jaccard:\n return total_dice / (h + 1), total_accuracy / (h + 1)\n return total_dice / (h + 1), total_accuracy / (h + 1), total_jaccard / (h + 1)\n \n def compute_accuracy(self, prediction, mask):\n thresholded = (prediction > 0.5).float()\n pixel_accuracy = self.pixel_accuracy(thresholded, mask)\n dice_score = self.dice(thresholded, mask)\n return pixel_accuracy, dice_score\n \n def plot_roc(self, predictions, true_values):\n \"\"\"\n Computes the ROC Curve and computes the Area Under the Curve (AUC). \n Also used to estimate the best threshold, given that we need to minimize \n false negatives.\n Input: predictions of shape (N,)\n true_values of shape (N,)\n \"\"\"\n fpr, tpr = dict(), dict()\n true_values = true_values.cpu().detach().numpy()\n predictions = predictions.cpu().detach().numpy()\n thresholded = (predictions > 0.5)\n acc = np.sum(true_values==thresholded) / true_values.shape[0]\n print(\"accuracy: \", acc)\n dice = self.dice(thresholded, true_values)\n print(\"dice:\",dice)\n fpr, tpr, thresholds = roc_curve(true_values, predictions)\n index_cutoff = np.argmax(tpr - fpr)\n fpr_cutoff, tpr_cutoff = fpr[index_cutoff], tpr[index_cutoff]\n optimal_cutoff = thresholds[np.argmax(tpr - fpr)]\n print(\"fpr at cutoff: \", fpr_cutoff)\n print(\"tpr at cutoff: \", tpr_cutoff)\n print(\"optimal cutoff: \", optimal_cutoff)\n roc_auc = auc(fpr, tpr)\n plt.figure()\n plt.plot(fpr, tpr, color='darkred', lw=2, label=\"ROC Curve (area = %0.2f, F1 = %0.2f )\" % (roc_auc, dice))\n plt.plot([0,1],[0,1], color=\"navy\", lw=2, linestyle=\"--\")\n plt.xlim([0.0,1.0])\n plt.ylim([0.0,1.05])\n plt.title(\"ROC Curve Model Densenet on 512x512 LGD patches\")\n plt.legend(loc=\"lower right\")\n plt.grid()\n plt.savefig(\"roc_five_512.png\",dpi=300)\n return roc_auc\n \n def one_hot_predictions(self, predictions):\n \"\"\"\n Given a pytorch vector with probability predictions,\n convert to a one hot representation, assuming a binary instance.\n \"\"\"\n predictions = predictions.cpu().detach().numpy()\n one_hot = np.zeros((predictions.shape[0],2))\n for i, pred in enumerate(predictions):\n one_hot[i,0] = 1 - pred\n one_hot[i,1] = pred\n return one_hot\n \n \n def save_images(self, tissue, prediction, mask, condition, t):\n ''' \n for visualization purposes, saves the image of the tissue patch\n in question alongside the predicted patch and the mask for the corresponding\n path.\n ''' \n path= \"/home/bcardenasguevara/Unet_Cleaned/AMLab_Unet/data/predictions\"\n if condition:\n pred = prediction.squeeze().cpu().detach().numpy()\n tissue = tissue.squeeze().cpu().detach().numpy().T\n mask = mask.squeeze().cpu().detach().numpy()\n plt.imsave(os.path.join(path, str(t)+\"pred\"),pred)\n plt.imsave(os.path.join(path, str(t)+\"cell\"),tissue)\n plt.imsave(os.path.join(path, str(t)+\"true\"),mask, cmap=cm.gray)\n \n", "sub_path": "AMLab_AMC/AMC_Unet/metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 7692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.set_printoptions", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.dot", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.jaccard_similarity_score", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 137, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "matplotlib.cm.gray", "line_number": 179, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 179, "usage_type": "name"}]} {"seq_id": "582906487", "text": "import os\nimport requests\nfrom ratelimit import limits, sleep_and_retry\n# import ruamel.yaml\nimport json\nimport yaml\n\n\nBIO_TOOLS_API=\"https://bio.tools/api/tool/%s&format=json\"\nBIO_TOOLS_TOOL_API=\"https://bio.tools/api/tool/%s/?format=json\"\n\n\nFIFTEEN_MINUTES = 60\n\n\n@sleep_and_retry\n@limits(calls=1000, period=FIFTEEN_MINUTES)\ndef call_api(url):\n response = requests.get(url)\n\n if response.status_code != 200:\n raise Exception('API response: {}'.format(response.status_code))\n return response\n\n\nclass MyDumper(yaml.Dumper):\n\n def increase_indent(self, flow=False, indentless=False):\n return super(MyDumper, self).increase_indent(flow, False)\n\n\ndef sync_tool(tool):\n \"\"\"\n This function allows to download the specific content for a tool.\n :param tool: tool\n :return:\n \"\"\"\n id = tool['biotoolsID']\n if id is not None:\n if not os.path.exists(id):\n os.mkdir(id)\n tools_url = BIO_TOOLS_TOOL_API.replace(\"%s\", id)\n try:\n response = call_api(tools_url)\n with open(id + '/descriptor.yaml', 'w') as stream:\n json_data = json.loads(response.content.decode('utf-8'))\n yaml.dump(json_data, stream, Dumper=MyDumper, default_flow_style=False)\n print(\"Processing -- \" + id)\n except Exception:\n print(\"Error processing -- \" + id)\n\n\ndef main():\n \"\"\"\n This function enable to loop over all tools in bio.tools and write into a file ach representation.\n \"\"\"\n current_page=\"?page=1\"\n tools_url = BIO_TOOLS_API.replace(\"%s\", current_page)\n response = requests.get(tools_url, headers={\"Content-Type\": \"application/json\"})\n\n data = response.json()\n count = 0; \n\n while(data['next'] != None):\n for index in range(len(data['list'])):\n sync_tool(data['list'][index])\n count = count + len(data['list'])\n print(\"Number of tools processed -- \" + str(count))\n\n tools_url = BIO_TOOLS_API.replace(\"%s\", data['next'])\n response = call_api(tools_url)\n data = response.json()\n \n\nif __name__ == \"__main__\":\n main()", "sub_path": "sync_tools.py", "file_name": "sync_tools.py", "file_ext": "py", "file_size_in_byte": 2128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "ratelimit.sleep_and_retry", "line_number": 16, "usage_type": "name"}, {"api_name": "ratelimit.limits", "line_number": 17, "usage_type": "call"}, {"api_name": "yaml.Dumper", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}]} {"seq_id": "336111984", "text": "import zlib\nimport os\nimport sys\nimport string\nimport shutil\nimport glob\nimport base64\nimport subprocess\nimport platform\n\nfrom pathlib import Path\nfrom ast import literal_eval\nfrom time import time, sleep\nfrom hashlib import md5\n\n_DEBUG = False\nPLAT_QUOTE = '\"' if platform.system() == \"Windows\" else \"'\"\nITEM_SEPARATOR = \"\\t\"\n\ncurPath = Path(__file__).resolve().parent\ncurPlatform = platform.system()\n\nif curPath.name == \"source\":\n curPath = curPath.parent / \"launcher\"\n os.chdir(curPath.as_posix())\n \ndef debugMsg(*args, waitInput=True):\n if _DEBUG:\n finalMsg = \"\"\n for arg in args: finalMsg += str(arg)\n print(finalMsg)\n if waitInput: input(\"Press any key to continue...\")\n\ndef loadConfig(path):\n config = None\n configPath = path / \"config.json\"\n engineCandidates = [curPlatform, curPlatform + \"32\", curPlatform + \"64\"]\n enginePath = None\n \n for _path in engineCandidates:\n _path = path / (_path + \"/engine_executable.txt\")\n if _path.exists():\n enginePath = _path.resolve()\n break\n \n if configPath.exists() and enginePath is not None:\n with open(configPath.as_posix(), \"r\") as sourceFile:\n config = literal_eval(sourceFile.read())\n print(\"> Read config from\", configPath.as_posix())\n \n if enginePath.exists():\n with open(enginePath.as_posix(), \"r\") as sourceFile:\n enginePathRead = literal_eval(sourceFile.read().split('=')[-1])\n \n if enginePathRead:\n config[\"EnginePath\"] = enginePathRead\n print(\"> Read engine path from\", enginePath.as_posix())\n \n return config\n\ndef getGameDir(config):\n \n def getGameDirName(name):\n result = \"\"\n allowedChars = string.ascii_lowercase + string.ascii_uppercase + \" -\"\n for c in name:\n if c in allowedChars:\n result += c\n return result\n \n gameDir = Path.home()\n gameName = getGameDirName(config[\"GameName\"])\n \n # Get game directory in a cross platform way\n if sys.platform == \"win32\":\n gameDir = Path(os.environ.get(\"APPDATA\")) / gameName\n elif sys.platform == \"linux\":\n gameDir = gameDir / (\".local/share/\" + gameName)\n \n # Create game directory\n gameDir.mkdir(parents=True, exist_ok=True)\n \n return gameDir\n \ndef getTempDir(config):\n tempDir = Path.home()\n tempDirName = md5((\"BGArmor\" + str(time())).encode()).hexdigest().upper()\n \n if sys.platform == \"win32\":\n tempDir = Path(os.environ.get(\"TEMP\")) / tempDirName\n elif sys.platform == \"linux\":\n tempDir = Path(\"/tmp/.\" + tempDirName)\n \n tempDir.mkdir(parents=False, exist_ok=True)\n \n if sys.platform == \"win32\":\n import ctypes\n ctypes.windll.kernel32.SetFileAttributesW(tempDir.as_posix(), 2)\n \n return tempDir\n \ndef getFilesLists(path):\n persistentFiles = []\n generalFiles = []\n \n # Populate persistent files list\n for pattern in config[\"Persistent\"]:\n persistentFiles += [Path(p).resolve() for p in glob.glob(\n path.as_posix() + \"/**/\" + pattern, recursive=True)]\n \n # Populate general files list\n generalFiles += [Path(p).resolve() for p in glob.glob(\n path.as_posix() + \"/**/*\", recursive=True)\n if Path(p).is_file()]\n \n # Remove persistent files from general files list\n for pers in persistentFiles:\n for gen in generalFiles:\n if pers.samefile(gen):\n generalFiles.remove(gen)\n \n return [persistentFiles, generalFiles]\n\ndef ensurePath(path):\n path.parent.mkdir(parents=True, exist_ok=True)\n return path\n\ndef decompressDataFile(dataFile, targetPath):\n startTime = time()\n \n if dataFile.exists():\n curLineType = \"Path\"\n filePath = None\n numChunks = 1\n curChunk = 0\n print(\"\\n> Decompressing data file from\", dataFile.as_posix())\n \n for line in open(dataFile.as_posix(), \"rb\"):\n if curLineType == \"Path\":\n lineItems = base64.b64decode(line)\n lineItems = lineItems.decode()\n lineItems = lineItems.split(ITEM_SEPARATOR)\n filePath = (targetPath / lineItems[0])\n numChunks = literal_eval(lineItems[1])\n curLineType = \"Data\"\n \n else:\n if not filePath.parent.exists():\n try:\n os.makedirs(filePath.parent.as_posix())\n except:\n pass\n \n if curChunk < numChunks:\n with open(filePath.as_posix(), \"ab\") as targetFileObj:\n targetFileObj.write(zlib.decompress(base64.b64decode(line)))\n curChunk += 1\n \n if curChunk >= numChunks:\n curChunk = 0\n numChunks = 1\n curLineType = \"Path\"\n \n print(\"> Done! Time taken:\", round(time() - startTime, 3), \"seconds\\n\")\n\ndef copyPersistentFiles(pathFrom, pathTo, filesList):\n for fileFrom in filesList:\n fileRelative = Path(fileFrom.as_posix().replace(pathFrom.as_posix(), \"\")[1:])\n fileTo = (pathTo / fileRelative)\n shutil.copy(fileFrom.as_posix(), ensurePath(fileTo).as_posix())\n\ndef removeEmptyDirs(path):\n for root, dirs, files in os.walk(path.as_posix(), topdown=False):\n root = Path(root).resolve()\n for _dir in dirs:\n _dir = root / _dir\n try:\n _dir.rmdir()\n except:\n pass\n\nconfig = loadConfig(curPath)\n\nif config is not None:\n dataFile = curPath.parent / config[\"DataFile\"]\n \n if dataFile.exists():\n dataFile = dataFile.resolve()\n gameDir = getGameDir(config)\n tempDir = getTempDir(config)\n \n debugMsg(\"> Extract game data into temp directory...\")\n decompressDataFile(dataFile, tempDir)\n \n filesLists = getFilesLists(gameDir)\n persistentFiles = filesLists[0]\n \n debugMsg(\"> Copy persistent files from game to temp directory...\")\n copyPersistentFiles(gameDir, tempDir, persistentFiles)\n \n enginePath = curPath.parent / config[\"EnginePath\"]\n \n if platform.system() != \"Windows\":\n os.system(\"chmod +x \" + PLAT_QUOTE + enginePath.as_posix() + PLAT_QUOTE)\n \n extraArgs = \" \" + \" \".join(sys.argv[1:]) if len(sys.argv) > 1 else \"\"\n command = PLAT_QUOTE + enginePath.as_posix() + PLAT_QUOTE + extraArgs + \" \" + PLAT_QUOTE + config[\"MainFile\"] + PLAT_QUOTE\n os.chdir(tempDir.as_posix())\n debugMsg(\"> Launch game in blenderplayer\")\n subprocess.call(command, shell=True)\n sleep(0.2)\n \n filesLists = getFilesLists(tempDir)\n persistentFiles = filesLists[0]\n \n debugMsg(\"> Copy persistent files from temp to game directory...\")\n copyPersistentFiles(tempDir, gameDir, persistentFiles)\n \n debugMsg(\"> Remove all files before finish...\")\n for _file in filesLists[0] + filesLists[1]:\n _file.unlink()\n \n removeEmptyDirs(tempDir)\n os.chdir(tempDir.parent.as_posix())\n shutil.rmtree(tempDir.as_posix())\n \n else:\n print(\"X Could not find game data at\", dataFile.as_posix())\n", "sub_path": "source/launcher.py", "file_name": "launcher.py", "file_ext": "py", "file_size_in_byte": 7662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "platform.system", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 21, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 25, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 48, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 53, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 65, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pathlib.Path.home", "line_number": 71, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "name"}, {"api_name": "sys.platform", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 76, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pathlib.Path.home", "line_number": 86, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 90, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 90, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 96, "usage_type": "attribute"}, {"api_name": "ctypes.windll.kernel32.SetFileAttributesW", "line_number": 98, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 108, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 108, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 112, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 112, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "call"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 140, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 144, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 150, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 156, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 156, "usage_type": "call"}, {"api_name": "time.time", "line_number": 164, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 168, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 170, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 173, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 174, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 203, "usage_type": "call"}, {"api_name": "os.system", "line_number": 204, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 208, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 210, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 211, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 224, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 225, "usage_type": "call"}]} {"seq_id": "118529744", "text": "import cv2 as cv\nimport numpy as np\n\ndef color_space(src):\n gray = cv.cvtColor(src,cv.COLOR_BGR2GRAY)\n cv.imshow(\"gray\",gray)\n gray = cv.cvtColor(src, cv.COLOR_BGR2HSV)\n cv.imshow(\"hsv\", gray)\n gray = cv.cvtColor(src, cv.COLOR_BGR2YUV)\n cv.imshow(\"yuv\", gray)\n gray = cv.cvtColor(src, cv.COLOR_BGR2YCrCb)\n cv.imshow(\"ycrcb\", gray)\n\n\ndef extrace_object():\n capture = cv.VideoCapture(\"D:/xxx.mp4\")\n while True:\n ret,frame = capture.read()\n if ret == False:\n break\n hsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)\n lower_hsv = np.array([35,43,46])\n upper_hsv = np.array([77,255,255])\n mask = cv.inRange(hsv,lowerb=lower_hsv,upperb=upper_hsv)\n cv.imshow('video',frame)\n cv.imshow(\"MASK\",mask)\n c = cv.waitKey(50)\n if c == 27:\n break\n\nprint(\"-------Hello Python-------\")\nsrc = cv.imread(\"C:/Users/Administrator/Desktop/caps.bmp\")\ncv.namedWindow(\"input_image\",cv.WINDOW_NORMAL)\ncv.imshow(\"input_image\",src)\ncolor_space(src)\n# extrace_object()\n\nb,g,r = cv.split(src)\ncv.imshow(\"b\",b)\ncv.imshow(\"g\",g)\ncv.imshow(\"r\",r)\n\nsrc[:,:,2] = 0\n# src = cv.merge([b,g,r])\ncv.imshow(\"change\",src)\ncv.waitKey(0)\n\ncv.destroyAllWindows()", "sub_path": "tutorial_3.py", "file_name": "tutorial_3.py", "file_ext": "py", "file_size_in_byte": 1230, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cv2.cvtColor", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2YUV", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2YCrCb", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 48, "usage_type": "call"}]} {"seq_id": "637856867", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Load the config file.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import join\nimport yaml\nimport shutil\n\n\ndef load_config(config_path, is_eval=False):\n \"\"\"Load configure file.\n Args:\n config_path (string):\n is_eval (bool, option):\n Returns:\n params (dict):\n \"\"\"\n with open(config_path, \"r\") as f:\n config = yaml.load(f)\n\n # Load the parent config file\n if 'parent' in config.keys():\n if not is_eval:\n with open(config['parent'], \"r\") as fp:\n config_parent = yaml.load(fp)\n\n else:\n with open(config_path.replace('config.yml', 'config_parent.yml'), \"r\") as fp:\n config_parent = yaml.load(fp)\n\n params = config_parent['param']\n\n # Override\n for key in config['param'].keys():\n params[key] = config['param'][key]\n\n else:\n params = config['param']\n\n return params\n\n\ndef save_config(config_path, save_path):\n \"\"\"Save configure file.\n Args:\n config_path (string):\n Returns:\n save_path (string):\n \"\"\"\n\n shutil.copyfile(config_path, join(save_path, 'config.yml'))\n\n # Save the parent config file\n with open(config_path, \"r\") as f:\n config = yaml.load(f)\n\n # Load the parent config file\n if 'parent' in config.keys():\n config_parent_path = config['parent']\n shutil.copyfile(config_parent_path, join(\n save_path, 'config_parent.yml'))\n", "sub_path": "utils/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "yaml.load", "line_number": 24, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 30, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 34, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 60, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}]} {"seq_id": "603266196", "text": "## All the necessary middleware are configured here\nfrom fastapi.middleware.cors import CORSMiddleware\n\ndef setup_CORS(app):\n '''\n Helps to setup the CORS middleware\n\n Args:\n app -- FastAPI object to add the middleware for the CORS setup\n '''\n origins = [\n \"http://localhost\",\n \"http://localhost:3000\",\n ]\n app.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n", "sub_path": "middleware/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 16, "usage_type": "argument"}]} {"seq_id": "136578542", "text": "\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nfrom gtts import gTTS\nfrom pygame import mixer\nimport speech_recognition as sr\nimport os\n\nr = sr.Recognizer()\nmixer.init()\n\nbot = ChatBot('Bot')\nbot.set_trainer(ListTrainer)\n\n\n\nwhile True:\n message = input('You: ')\n if message.strip() != 'Bye':\n reply = bot.get_response(message)\n\n tts = gTTS('' +str(reply), lang = 'en')\n tts.save('kasule.mp3')\n mixer.music.load('kasule.mp3')\n mixer.music.play()\n\n print('ChatBot :', reply)\n if message.strip() == 'Bye':\n print('ChatBot : Bye')\n break\n", "sub_path": "chatbot.py", "file_name": "chatbot.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 10, "usage_type": "name"}, {"api_name": "chatterbot.ChatBot", "line_number": 12, "usage_type": "call"}, {"api_name": "chatterbot.trainers.ListTrainer", "line_number": 13, "usage_type": "argument"}, {"api_name": "gtts.gTTS", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 24, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 25, "usage_type": "name"}]} {"seq_id": "263037416", "text": "import logging\nimport os\nimport sys\nimport unittest\nimport pandas\nimport numpy\nimport random\nfrom sklearn.preprocessing import MinMaxScaler\nfrom pdf2image import convert_from_path, convert_from_bytes\nimport scipy.spatial as spatial\n\nfrom core import config\n\n\nsys.path.append(\".\")\n\nfrom latex.LayoutReader.trueformatpdf2htmlEX import PDF_AnnotatorTool\nfrom core.pathant.Converter import converter\n\nFEATURES_FROM_PDF2HTMLEX = \"features_from_pdf2htmlex\"\n\n@converter(\"htm\", \"feature\")\nclass LabeledFeatureMaker(PDF_AnnotatorTool):\n def __init__(self, debug=True, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.debug = debug\n\n def __call__(self, labeled_paths, *args, **kwargs):\n for doc_id, (labeled_html_path, meta) in enumerate(labeled_paths):\n if \"training\" in self.flags and not any([tex in labeled_html_path for tex in [\"tex1\", 'tex2', 'tex3']]):\n continue\n\n if FEATURES_FROM_PDF2HTMLEX in self.flags:\n try:\n feature_df = self.use_pdf2htmlEX_features(meta['html_path'] + \".feat\")\n except FileNotFoundError as e:\n self.logger.error(\"output of pdf2htmlEX was not found\")\n raise e\n except Exception as e:\n self.logger.error(f\"could not compute features, proceeding ... error was {e}\")\n raise e\n else:\n try:\n feature_df, soup = self.compute_html_soup_features(labeled_html_path)\n except Exception as e:\n self.logger.error(\"Could not compute features for this document, empty document?\")\n continue\n\n for random_i, final_feature_df in enumerate(self.feature_fuzz(feature_df)):\n final_feature_df = self.compute_complex_coordination_data(final_feature_df)\n\n final_feature_df[\"doc_id\"] = str(doc_id) + \".random\" + str(random_i)\n meta[\"doc_id\"] = str(doc_id) + \".random\" + str(random_i)\n meta['html_path'] = labeled_html_path\n min_max_scaler = MinMaxScaler()\n x = final_feature_df[config.cols_to_use].values\n x_scaled = min_max_scaler.fit_transform(x)\n df_temp = pandas.DataFrame(x_scaled, columns=config.cols_to_use, index=final_feature_df.index)\n final_feature_df[config.cols_to_use] = df_tem\n\n images = convert_from_path(meta[\"pdf_path\"])\n for page_number, pil in enumerate(page_number, pil):\n image_path = f'{meta[\"path\"]}.{page_number}.jpg'\n df['image_path'] = np.where(final_feature_df.page_number == page_number,\n final_feature_df.page_number.map({page_number:image_path}),\n final_feature_df.page_number\n )\n pil.save(image_path)\n\n yield final_feature_df, meta\n\n def compute_complex_coordination_data(self, feature_df):\n feature_df = feature_df.groupby(['page_number']).apply(self.page_web)\n return feature_df\n\n def distances(self, sub_df, ax, ay, bx, by):\n return [min([((xa - xb) ** 2 + (ya - yb) ** 2)\n for xa, ya in list(zip(sub_df[ax], sub_df[ay]))\n if ya != yb and xa != xb])\n for xb, yb in list(zip(sub_df[bx], sub_df[by]))]\n\n def sinuses(self, sub_df, ax, ay, bx, by):\n return [min([round((((xa - xb) / (ya - yb))), 2)\n for xa, ya in list(zip(sub_df[ax], sub_df[ay]))\n if ya != yb and xa != xb])\n for xb, yb in list(zip(sub_df[bx], sub_df[by]))]\n\n def page_web(self, page_df):\n\n self.point_density_frequence_per_page(page_df)\n\n if FEATURES_FROM_PDF2HTMLEX in self.flags:\n page_df['x1'] = page_df.x\n page_df['y1'] = page_df.y\n page_df['x2'] = page_df.x + page_df.ascent\n page_df['y2'] = page_df.y + page_df.descent\n page_df['center_x'] = [(x1 + x1) / 2 for x1, x2 in zip(page_df.x1, page_df.x2)]\n page_df['center_y'] = [(y1 + y1) / 2 for y1, y2 in zip(page_df.y1, page_df.y2)]\n\n points = list(zip(page_df.center_x, page_df.center_y))\n kd_tree = spatial.KDTree(points)\n\n try:\n all_nearest_points = \\\n [(p, [points[k] for k in kd_tree.query(p, k=config.layout_model_next_text_boxes)[1]])\n for p in zip(page_df.center_x, page_df.center_y)]\n\n for k in range(config.layout_model_next_text_boxes):\n page_df[f'nearest_{k}_center_x'], page_df[f'nearest_{k}_center_y'] = list(zip(*\n [nearest_points[k] for p1, nearest_points in all_nearest_points]\n ))\n\n except Exception as e:\n print (points)\n self.logger.warning(f\"not enough points in page to find {config.layout_model_next_text_boxes} nearest points, faking with 0.5\")\n for k in range(config.layout_model_next_text_boxes):\n page_df[f'nearest_{k}_center_x'] = [0.5] * len(page_df)\n page_df[f'nearest_{k}_center_y'] = [0.5] * len(page_df)\n\n page_df['dxy1'] = self.distances(page_df, 'x1', 'y1', 'x2', 'y2')\n page_df['dxy2'] = self.distances(page_df, 'x2', 'y2', 'x1', 'y1')\n page_df['dxy3'] = self.distances(page_df, 'x1', 'y2', 'x2', 'y1')\n page_df['dxy4'] = self.distances(page_df, 'x2', 'y1', 'x1', 'y2')\n\n page_df['probascent'] = page_df.ascent.map(page_df.ascent.value_counts(normalize=True))\n page_df['probdescent'] = page_df.descent.map(page_df.descent.value_counts(normalize=True))\n\n return page_df\n\n def feature_fuzz(self, feature_df):\n yield feature_df\n\n if False and \"training\" in self.flags:\n def compute_fuzz(series, value):\n return value + round(random.uniform(-fuzz_percent, +fuzz_percent), 2) * max(series)\n\n def iter_col(series):\n if isinstance(series[0], float):\n return series.apply(lambda x: compute_fuzz(series, x))\n else:\n return series\n\n for feature_fuzz_range in config.feature_fuzz_ranges:\n for fuzz_percent in numpy.arange(*feature_fuzz_range):\n yield feature_df.copy().apply(iter_col)\n\n\nclass TestComputedFeatureTable(unittest.TestCase):\n def init(self):\n from latex.LayoutReader.labeled_feature_maker import LabeledFeatureMaker\n\n latex_maker = LabeledFeatureMaker\n res = list(latex_maker.__call__(\n [(\n \"/home/finn/PycharmProjects/LayoutEagle/python/.core/tex_data/8d885eb85effba6b693ab5c3a82715ee/main.tex1.labeled.pdf\",\n {\n \"filename\": \".core/tex_data/8d885eb85effba6b693ab5c3a82715ee/main.tex1.labeled.pdf\"\n })]))\n self.df = res[0][0]\n\n def test_spider_web_lines(self):\n self.init()\n\n cols = self.df.columns\n assert (\"sin1\" in cols)\n assert (\"probsin1\" in cols)\n assert (\"probascent\" in cols)\n assert (\"dxy1\" in cols)\n assert (\"qwertz\" not in cols)\n print(cols)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "python/layout/latex/LayoutReader/labeled_feature_maker.py", "file_name": "labeled_feature_maker.py", "file_ext": "py", "file_size_in_byte": 7521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "latex.LayoutReader.trueformatpdf2htmlEX.PDF_AnnotatorTool", "line_number": 23, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 55, "usage_type": "call"}, {"api_name": "core.config.cols_to_use", "line_number": 56, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 56, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "call"}, {"api_name": "core.config.cols_to_use", "line_number": 58, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 58, "usage_type": "name"}, {"api_name": "core.config.cols_to_use", "line_number": 59, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 59, "usage_type": "name"}, {"api_name": "pdf2image.convert_from_path", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.spatial.KDTree", "line_number": 101, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 101, "usage_type": "name"}, {"api_name": "core.config.layout_model_next_text_boxes", "line_number": 105, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 105, "usage_type": "name"}, {"api_name": "core.config.layout_model_next_text_boxes", "line_number": 108, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 108, "usage_type": "name"}, {"api_name": "core.config.layout_model_next_text_boxes", "line_number": 115, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 115, "usage_type": "name"}, {"api_name": "core.config.layout_model_next_text_boxes", "line_number": 116, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 116, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 135, "usage_type": "call"}, {"api_name": "core.config.feature_fuzz_ranges", "line_number": 143, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 144, "usage_type": "call"}, {"api_name": "core.pathant.Converter.converter", "line_number": 22, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 148, "usage_type": "attribute"}, {"api_name": "latex.LayoutReader.labeled_feature_maker.LabeledFeatureMaker", "line_number": 152, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 174, "usage_type": "call"}]} {"seq_id": "286007078", "text": "# main webpage\n\n# include package needed to import other packages from own website\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^projects/', include('produced_albums_projects.urls')),\n url(r'^videos/', include('videos.urls')),\n]\n", "sub_path": "pedro_website/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}]} {"seq_id": "319217856", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom distutils.version import LooseVersion\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_fsm import transition, RETURN_VALUE\nfrom rest_framework.exceptions import ValidationError\nimport stripe\n\nfrom shop import __version__ as SHOP_VERSION\nfrom shop.models.order import BaseOrder, OrderModel, OrderPayment\nfrom shop.money import MoneyMaker\nfrom shop.payment.base import PaymentProvider\n\nstripe.api_key = settings.SHOP_STRIPE['APIKEY']\n\n\nclass StripePayment(PaymentProvider):\n \"\"\"\n Provides a payment service for Stripe.\n \"\"\"\n namespace = 'stripe-payment'\n\n def get_payment_request(self, cart, request):\n \"\"\"\n From the given request, add a snippet to the page.\n \"\"\"\n try:\n self.charge(cart, request)\n thank_you_url = OrderModel.objects.get_latest_url()\n js_expression = 'window.location.href=\"{}\";'.format(thank_you_url)\n return js_expression\n except (KeyError, stripe.error.StripeError) as err:\n raise ValidationError(err)\n\n def charge(self, cart, request):\n \"\"\"\n Use the Stripe token from the request and charge immediately.\n This view is invoked by the Javascript function `scope.charge()` delivered\n by `get_payment_request`.\n \"\"\"\n token_id = cart.extra['payment_extra_data']['token_id']\n if LooseVersion(SHOP_VERSION) < LooseVersion('0.11'):\n charge = stripe.Charge.create(\n amount=cart.total.as_integer(),\n currency=cart.total.currency,\n source=token_id,\n description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION']\n )\n if charge['status'] == 'succeeded':\n order = OrderModel.objects.create_from_cart(cart, request)\n order.add_stripe_payment(charge)\n order.save()\n else:\n order = OrderModel.objects.create_from_cart(cart, request)\n charge = stripe.Charge.create(\n amount=cart.total.as_integer(),\n currency=cart.total.currency,\n source=token_id,\n transfer_group=order.get_number(),\n description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION'],\n )\n if charge['status'] == 'succeeded':\n order.populate_from_cart(cart, request)\n order.add_stripe_payment(charge)\n order.save()\n\n if charge['status'] != 'succeeded':\n msg = \"Stripe returned status '{status}' for id: {id}\"\n raise stripe.error.InvalidRequestError(msg.format(**charge))\n\n\nclass OrderWorkflowMixin(object):\n TRANSITION_TARGETS = {\n 'paid_with_stripe': _(\"Paid using Stripe\"),\n }\n\n def __init__(self, *args, **kwargs):\n if not isinstance(self, BaseOrder):\n raise ImproperlyConfigured(\"class 'OrderWorkflowMixin' is not of type 'BaseOrder'\")\n\n super(OrderWorkflowMixin, self).__init__(*args, **kwargs)\n\n @transition(field='status', source=['created'], target='paid_with_stripe')\n def add_stripe_payment(self, charge):\n assert self.currency == charge['currency'].upper(), \"Currency mismatch\"\n Money = MoneyMaker(self.currency)\n amount = Money(charge['amount']) / Money.subunits\n OrderPayment.objects.create(order=self, amount=amount, transaction_id=charge['id'],\n payment_method=StripePayment.namespace)\n\n def is_fully_paid(self):\n return super(OrderWorkflowMixin, self).is_fully_paid()\n\n @transition(field='status', source='paid_with_stripe', conditions=[is_fully_paid],\n custom=dict(admin=True, button_name=_(\"Acknowledge Payment\")))\n def acknowledge_stripe_payment(self):\n self.acknowledge_payment()\n\n def refund_payment(self):\n \"\"\"\n Refund the payment using Stripe's refunding API.\n \"\"\"\n Money = MoneyMaker(self.currency)\n filter_kwargs = {\n 'transaction_id__startswith': 'ch_',\n 'payment_method': StripePayment.namespace,\n }\n for payment in self.orderpayment_set.filter(**filter_kwargs):\n refund = stripe.Refund.create(charge=payment.transaction_id)\n if refund['status'] == 'succeeded':\n amount = Money(refund['amount']) / Money.subunits\n OrderPayment.objects.create(order=self, amount=-amount, transaction_id=refund['id'],\n payment_method=StripePayment.namespace)\n\n del self.amount_paid # to invalidate the cache\n if self.amount_paid:\n # proceed with other payment service providers\n super(OrderWorkflowMixin, self).refund_payment()\n", "sub_path": "shop_stripe/payment.py", "file_name": "payment.py", "file_ext": "py", "file_size_in_byte": 4918, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "stripe.api_key", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SHOP_STRIPE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "shop.payment.base.PaymentProvider", "line_number": 22, "usage_type": "name"}, {"api_name": "shop.models.order.OrderModel.objects.get_latest_url", "line_number": 34, "usage_type": "call"}, {"api_name": "shop.models.order.OrderModel.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "shop.models.order.OrderModel", "line_number": 34, "usage_type": "name"}, {"api_name": "stripe.error", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 38, "usage_type": "call"}, {"api_name": "distutils.version.LooseVersion", "line_number": 47, "usage_type": "call"}, {"api_name": "shop.__version__", "line_number": 47, "usage_type": "argument"}, {"api_name": "stripe.Charge.create", "line_number": 48, "usage_type": "call"}, {"api_name": "stripe.Charge", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SHOP_STRIPE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 52, "usage_type": "name"}, {"api_name": "shop.models.order.OrderModel.objects.create_from_cart", "line_number": 55, "usage_type": "call"}, {"api_name": "shop.models.order.OrderModel.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "shop.models.order.OrderModel", "line_number": 55, "usage_type": "name"}, {"api_name": "shop.models.order.OrderModel.objects.create_from_cart", "line_number": 59, "usage_type": "call"}, {"api_name": "shop.models.order.OrderModel.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "shop.models.order.OrderModel", "line_number": 59, "usage_type": "name"}, {"api_name": "stripe.Charge.create", "line_number": 60, "usage_type": "call"}, {"api_name": "stripe.Charge", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SHOP_STRIPE", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 65, "usage_type": "name"}, {"api_name": "stripe.error.InvalidRequestError", "line_number": 74, "usage_type": "call"}, {"api_name": "stripe.error", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 79, "usage_type": "call"}, {"api_name": "shop.models.order.BaseOrder", "line_number": 83, "usage_type": "argument"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 84, "usage_type": "call"}, {"api_name": "shop.money.MoneyMaker", "line_number": 91, "usage_type": "call"}, {"api_name": "shop.models.order.OrderPayment.objects.create", "line_number": 93, "usage_type": "call"}, {"api_name": "shop.models.order.OrderPayment.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "shop.models.order.OrderPayment", "line_number": 93, "usage_type": "name"}, {"api_name": "django_fsm.transition", "line_number": 88, "usage_type": "call"}, {"api_name": "django_fsm.transition", "line_number": 99, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 100, "usage_type": "call"}, {"api_name": "shop.money.MoneyMaker", "line_number": 108, "usage_type": "call"}, {"api_name": "stripe.Refund.create", "line_number": 114, "usage_type": "call"}, {"api_name": "stripe.Refund", "line_number": 114, "usage_type": "attribute"}, {"api_name": "shop.models.order.OrderPayment.objects.create", "line_number": 117, "usage_type": "call"}, {"api_name": "shop.models.order.OrderPayment.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "shop.models.order.OrderPayment", "line_number": 117, "usage_type": "name"}]} {"seq_id": "527164548", "text": "from glob import glob\nfrom config import EXAMPLE_PATH, MEDIA_FILE_HOST, CONTEXT_RANGE\nimport json\nimport string\nfrom pathlib import Path\n\nclass Decks:\n def __init__(self, category=\"anime\", path=EXAMPLE_PATH, has_image=True, has_sound=True):\n self.sentences = {}\n self.sentence_map = {}\n self.sentence_translation_map = {}\n self.category = category\n self.path = path\n self.has_image = has_image\n self.has_sound = has_sound\n\n def get_sentence(self, sentence_id):\n if sentence_id in self.sentences:\n return self.sentences[sentence_id]\n else:\n return None\n\n def get_sentence_map(self):\n return self.sentence_map\n\n def get_sentence_translation_map(self):\n return self.sentence_translation_map\n\n def load_decks(self):\n deck_folders = glob(str(self.path) + '/*/')\n for deck_folder in deck_folders:\n self.load_deck_by_path(deck_folder)\n \n def load_deck_by_path(self, path):\n sentences = []\n file = Path(path, 'data.json')\n with open(file, encoding='utf-8') as f:\n sentences = json.load(f)\n \n for index, sentence in enumerate(sentences):\n sentence = self.parse_sentence(sentence)\n pretext_sentences = sentences[0:index] if index < CONTEXT_RANGE else sentences[index-CONTEXT_RANGE:index] \n posttext_sentences = []\n if index < len(sentences):\n posttext_sentences = sentences[index+1:len(sentences)] if index+CONTEXT_RANGE > len(sentences) else sentences[index+1:index+CONTEXT_RANGE] \n sentence[\"pretext\"] = [sentence[\"id\"] for sentence in pretext_sentences]\n sentence[\"posttext\"] = [sentence[\"id\"] for sentence in posttext_sentences]\n if 'word_base_list' in sentence:\n self.sentence_map = self.map_sentence(sentence['word_base_list'], sentence['id'], self.sentence_map)\n if 'translation_word_base_list' in sentence:\n self.sentence_translation_map = self.map_sentence(sentence['translation_word_base_list'], sentence['id'], self.sentence_translation_map)\n self.sentences[sentence[\"id\"]] = sentence\n\n def parse_sentence(self, sentence):\n if (self.has_image):\n image_path = '{}/{}/{}/media/{}'.format(MEDIA_FILE_HOST, self.category, sentence['deck_name'], sentence['image'])\n sentence['image_url'] = image_path.replace(\" \", \"%20\")\n \n if (self.has_sound):\n sound_path = '{}/{}/{}/media/{}'.format(MEDIA_FILE_HOST, self.category, sentence['deck_name'], sentence['sound'])\n sentence['sound_url'] = sound_path.replace(\" \", \"%20\")\n return sentence\n \n def map_sentence(self, words, example_id, output_map):\n for (index, word) in enumerate(words):\n is_repeat = words.index(word) != index\n if is_repeat:\n continue\n if word in string.punctuation or word in '๏ผ๏ผŸใ€‚ใ€๏ผˆ๏ผ‰':\n continue\n if word not in output_map:\n output_map[word] = set()\n output_map[word].add(example_id)\n return output_map\n", "sub_path": "decks/decks.py", "file_name": "decks.py", "file_ext": "py", "file_size_in_byte": 3204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "config.EXAMPLE_PATH", "line_number": 8, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}, {"api_name": "config.CONTEXT_RANGE", "line_number": 42, "usage_type": "name"}, {"api_name": "config.CONTEXT_RANGE", "line_number": 45, "usage_type": "name"}, {"api_name": "config.MEDIA_FILE_HOST", "line_number": 56, "usage_type": "argument"}, {"api_name": "config.MEDIA_FILE_HOST", "line_number": 60, "usage_type": "argument"}, {"api_name": "string.punctuation", "line_number": 69, "usage_type": "attribute"}]} {"seq_id": "581534143", "text": "# -*- coding: utf-8 -*- #\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test of the 'list' command.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.calliope import base as calliope_base\nfrom googlecloudsdk.core import grpc_util\nfrom googlecloudsdk.core import resources\nfrom tests.lib import cli_test_base\nfrom tests.lib import parameterized\nfrom tests.lib import sdk_test_base\nfrom tests.lib import test_case\n\nimport mock\nimport six\nfrom six.moves import range # pylint: disable=redefined-builtin\n\n\nif six.PY2:\n # TODO(b/78118402): gRPC support on Python 3.\n # This doesn't work on py3. We skip the import here just so tests can load\n # and be skipped without crashing.\n from google.bigtable.admin.v2 import bigtable_table_admin_pb2 # pylint: disable=g-import-not-at-top\n from google.bigtable.admin.v2 import bigtable_table_admin_pb2_grpc # pylint: disable=g-import-not-at-top\n from google.bigtable.admin.v2 import table_pb2 # pylint: disable=g-import-not-at-top\n\n # Can't create this class on py3 because it needs the grpc pb files.\n class BigtableTestStub(bigtable_table_admin_pb2_grpc.BigtableTableAdminStub):\n\n def __init__(self, channel):\n # pylint:disable=invalid-name\n self.ListTables = mock.MagicMock()\n\n\n@test_case.Filters.SkipOnPy3('Not yet py3 compatible', 'b/78118402')\n@parameterized.parameters(calliope_base.ReleaseTrack.ALPHA,\n calliope_base.ReleaseTrack.BETA)\nclass ListTests(sdk_test_base.WithFakeAuth,\n cli_test_base.CliTestBase,\n sdk_test_base.WithOutputCapture):\n\n def SetUp(self):\n self.StartObjectPatch(grpc_util, 'MakeSecureChannel')\n self.stub = BigtableTestStub(channel=None)\n self.StartObjectPatch(\n bigtable_table_admin_pb2_grpc, 'BigtableTableAdminStub',\n new=lambda _: self.stub)\n\n def _MakeInstanceRef(self, name):\n return resources.REGISTRY.Create(\n 'bigtableadmin.projects.instances',\n instancesId=name,\n projectsId=self.Project())\n\n def _MakeTableRef(self, name, instance_ref):\n return resources.REGISTRY.Create(\n 'bigtableadmin.projects.instances.tables',\n tablesId=name,\n **instance_ref.AsDict())\n\n def testEmpty_DefaultName(self, track):\n self.track = track\n instance_ref = self._MakeInstanceRef('-')\n request = bigtable_table_admin_pb2.ListTablesRequest(\n parent=instance_ref.RelativeName(),\n )\n self.stub.ListTables.return_value = (\n bigtable_table_admin_pb2.ListTablesResponse())\n\n self.Run('bigtable instances tables list')\n\n self.AssertOutputEquals('')\n self.AssertErrEquals('Listed 0 items.\\n')\n self.stub.ListTables.assert_called_once_with(request)\n\n def testSingle_Name(self, track):\n self.track = track\n instance_ref = self._MakeInstanceRef('ocean')\n request = bigtable_table_admin_pb2.ListTablesRequest(\n parent=instance_ref.RelativeName(),\n )\n self.stub.ListTables.return_value = (\n bigtable_table_admin_pb2.ListTablesResponse(\n tables=[\n table_pb2.Table(name=self._MakeTableRef(\n 'fish1', instance_ref).SelfLink())\n ],\n ))\n\n self.Run('bigtable instances tables list --instances {}'\n .format(instance_ref.Name()))\n\n self.AssertOutputEquals('NAME\\nfish1\\n')\n self.AssertErrEquals('')\n self.stub.ListTables.assert_called_once_with(request)\n\n def testMultiple_Uri(self, track):\n self.track = track\n instance_ref = self._MakeInstanceRef('ocean')\n request = bigtable_table_admin_pb2.ListTablesRequest(\n parent=instance_ref.RelativeName(),\n )\n fish = [self._MakeTableRef('fish' + str(i), instance_ref) for i in range(3)]\n self.stub.ListTables.return_value = (\n bigtable_table_admin_pb2.ListTablesResponse(\n tables=[table_pb2.Table(name=f.RelativeName()) for f in fish],\n ))\n\n self.Run('bigtable instances tables list --instances {} --uri'\n .format(instance_ref.Name()))\n\n self.AssertOutputEquals('\\n'.join(f.SelfLink() for f in fish) + '\\n')\n self.AssertErrEquals('')\n self.stub.ListTables.assert_called_once_with(request)\n\n\nif __name__ == '__main__':\n test_case.main()\n", "sub_path": "google-cloud-sdk/lib/tests/unit/surface/bigtable/instances/tables/list_test.py", "file_name": "list_test.py", "file_ext": "py", "file_size_in_byte": 4842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "six.PY2", "line_number": 35, "usage_type": "attribute"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2_grpc.BigtableTableAdminStub", "line_number": 44, "usage_type": "attribute"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2_grpc", "line_number": 44, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 48, "usage_type": "call"}, {"api_name": "tests.lib.sdk_test_base.WithFakeAuth", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tests.lib.sdk_test_base", "line_number": 54, "usage_type": "name"}, {"api_name": "tests.lib.cli_test_base.CliTestBase", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tests.lib.cli_test_base", "line_number": 55, "usage_type": "name"}, {"api_name": "tests.lib.sdk_test_base.WithOutputCapture", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tests.lib.sdk_test_base", "line_number": 56, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.grpc_util", "line_number": 59, "usage_type": "argument"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2_grpc", "line_number": 62, "usage_type": "argument"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY.Create", "line_number": 66, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY", "line_number": 66, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.resources", "line_number": 66, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY.Create", "line_number": 72, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY", "line_number": 72, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.resources", "line_number": 72, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest", "line_number": 80, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2", "line_number": 80, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse", "line_number": 84, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2", "line_number": 84, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest", "line_number": 95, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2", "line_number": 95, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse", "line_number": 99, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2", "line_number": 99, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.table_pb2.Table", "line_number": 101, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.table_pb2", "line_number": 101, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesRequest", "line_number": 116, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2", "line_number": 116, "usage_type": "name"}, {"api_name": "six.moves.range", "line_number": 119, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2.ListTablesResponse", "line_number": 121, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.bigtable_table_admin_pb2", "line_number": 121, "usage_type": "name"}, {"api_name": "google.bigtable.admin.v2.table_pb2.Table", "line_number": 122, "usage_type": "call"}, {"api_name": "google.bigtable.admin.v2.table_pb2", "line_number": 122, "usage_type": "name"}, {"api_name": "tests.lib.test_case.Filters.SkipOnPy3", "line_number": 51, "usage_type": "call"}, {"api_name": "tests.lib.test_case.Filters", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tests.lib.test_case", "line_number": 51, "usage_type": "name"}, {"api_name": "tests.lib.parameterized.parameters", "line_number": 52, "usage_type": "call"}, {"api_name": "tests.lib.parameterized", "line_number": 52, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 52, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 52, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 53, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 53, "usage_type": "name"}, {"api_name": "tests.lib.test_case.main", "line_number": 134, "usage_type": "call"}, {"api_name": "tests.lib.test_case", "line_number": 134, "usage_type": "name"}]} {"seq_id": "574757108", "text": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\n\"\"\"\n KFServing\n\n Python SDK for KFServing # noqa: E501\n\n The version of the OpenAPI document: v0.1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom kfserving.configuration import Configuration\n\n\nclass V1beta1ComponentStatusSpec(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'address': 'KnativeAddressable',\n 'latest_created_revision': 'str',\n 'latest_ready_revision': 'str',\n 'previous_ready_revision': 'str',\n 'traffic_percent': 'int',\n 'url': 'KnativeURL'\n }\n\n attribute_map = {\n 'address': 'address',\n 'latest_created_revision': 'latestCreatedRevision',\n 'latest_ready_revision': 'latestReadyRevision',\n 'previous_ready_revision': 'previousReadyRevision',\n 'traffic_percent': 'trafficPercent',\n 'url': 'url'\n }\n\n def __init__(self, address=None, latest_created_revision=None, latest_ready_revision=None, previous_ready_revision=None, traffic_percent=None, url=None, local_vars_configuration=None): # noqa: E501\n \"\"\"V1beta1ComponentStatusSpec - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._address = None\n self._latest_created_revision = None\n self._latest_ready_revision = None\n self._previous_ready_revision = None\n self._traffic_percent = None\n self._url = None\n self.discriminator = None\n\n if address is not None:\n self.address = address\n if latest_created_revision is not None:\n self.latest_created_revision = latest_created_revision\n if latest_ready_revision is not None:\n self.latest_ready_revision = latest_ready_revision\n if previous_ready_revision is not None:\n self.previous_ready_revision = previous_ready_revision\n if traffic_percent is not None:\n self.traffic_percent = traffic_percent\n if url is not None:\n self.url = url\n\n @property\n def address(self):\n \"\"\"Gets the address of this V1beta1ComponentStatusSpec. # noqa: E501\n\n\n :return: The address of this V1beta1ComponentStatusSpec. # noqa: E501\n :rtype: KnativeAddressable\n \"\"\"\n return self._address\n\n @address.setter\n def address(self, address):\n \"\"\"Sets the address of this V1beta1ComponentStatusSpec.\n\n\n :param address: The address of this V1beta1ComponentStatusSpec. # noqa: E501\n :type: KnativeAddressable\n \"\"\"\n\n self._address = address\n\n @property\n def latest_created_revision(self):\n \"\"\"Gets the latest_created_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n\n Latest revision name that is in created # noqa: E501\n\n :return: The latest_created_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n :rtype: str\n \"\"\"\n return self._latest_created_revision\n\n @latest_created_revision.setter\n def latest_created_revision(self, latest_created_revision):\n \"\"\"Sets the latest_created_revision of this V1beta1ComponentStatusSpec.\n\n Latest revision name that is in created # noqa: E501\n\n :param latest_created_revision: The latest_created_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n :type: str\n \"\"\"\n\n self._latest_created_revision = latest_created_revision\n\n @property\n def latest_ready_revision(self):\n \"\"\"Gets the latest_ready_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n\n Latest revision name that is in ready state # noqa: E501\n\n :return: The latest_ready_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n :rtype: str\n \"\"\"\n return self._latest_ready_revision\n\n @latest_ready_revision.setter\n def latest_ready_revision(self, latest_ready_revision):\n \"\"\"Sets the latest_ready_revision of this V1beta1ComponentStatusSpec.\n\n Latest revision name that is in ready state # noqa: E501\n\n :param latest_ready_revision: The latest_ready_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n :type: str\n \"\"\"\n\n self._latest_ready_revision = latest_ready_revision\n\n @property\n def previous_ready_revision(self):\n \"\"\"Gets the previous_ready_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n\n Previous revision name that is in ready state # noqa: E501\n\n :return: The previous_ready_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n :rtype: str\n \"\"\"\n return self._previous_ready_revision\n\n @previous_ready_revision.setter\n def previous_ready_revision(self, previous_ready_revision):\n \"\"\"Sets the previous_ready_revision of this V1beta1ComponentStatusSpec.\n\n Previous revision name that is in ready state # noqa: E501\n\n :param previous_ready_revision: The previous_ready_revision of this V1beta1ComponentStatusSpec. # noqa: E501\n :type: str\n \"\"\"\n\n self._previous_ready_revision = previous_ready_revision\n\n @property\n def traffic_percent(self):\n \"\"\"Gets the traffic_percent of this V1beta1ComponentStatusSpec. # noqa: E501\n\n Traffic percent on the latest ready revision # noqa: E501\n\n :return: The traffic_percent of this V1beta1ComponentStatusSpec. # noqa: E501\n :rtype: int\n \"\"\"\n return self._traffic_percent\n\n @traffic_percent.setter\n def traffic_percent(self, traffic_percent):\n \"\"\"Sets the traffic_percent of this V1beta1ComponentStatusSpec.\n\n Traffic percent on the latest ready revision # noqa: E501\n\n :param traffic_percent: The traffic_percent of this V1beta1ComponentStatusSpec. # noqa: E501\n :type: int\n \"\"\"\n\n self._traffic_percent = traffic_percent\n\n @property\n def url(self):\n \"\"\"Gets the url of this V1beta1ComponentStatusSpec. # noqa: E501\n\n\n :return: The url of this V1beta1ComponentStatusSpec. # noqa: E501\n :rtype: KnativeURL\n \"\"\"\n return self._url\n\n @url.setter\n def url(self, url):\n \"\"\"Sets the url of this V1beta1ComponentStatusSpec.\n\n\n :param url: The url of this V1beta1ComponentStatusSpec. # noqa: E501\n :type: KnativeURL\n \"\"\"\n\n self._url = url\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, V1beta1ComponentStatusSpec):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, V1beta1ComponentStatusSpec):\n return True\n\n return self.to_dict() != other.to_dict()\n", "sub_path": "python/kfserving/kfserving/models/v1beta1_component_status_spec.py", "file_name": "v1beta1_component_status_spec.py", "file_ext": "py", "file_size_in_byte": 8968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "kfserving.configuration.Configuration", "line_number": 70, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 232, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 254, "usage_type": "call"}]} {"seq_id": "506867276", "text": "import dash_core_components as dcc\nimport plotly.express as px\nimport pandas as pd\nimport dash_daq as daq\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom dash.dependencies import Output, Input\n\nimport numpy as np\nimport base64\n\n\nfrom app import app\n# FA = \"https://use.fontawesome.com/releases/v5.15.1/css/all.css\"\n# app = dash.Dash(__name__,external_stylesheets=[dbc.themes.CYBORG, FA], suppress_callback_exceptions=True)#,prevent_initial_callbacks=True\ntoken = \"pk.eyJ1IjoiaHVtYW5pbmciLCJhIjoiY2tpcHJiN3BlMDBjaDJ1b2J6ODQ4dzNlcyJ9.55HzvciQ31i0_ODARa9rLQ\"\n\n#\ndf = pd.read_csv(\"data/tk2017_maps_so_far2.csv\", delimiter =';', decimal =',', encoding ='utf-8',na_values=['#DIV/0!'])\ndf_buurten = pd.read_csv(\"data/buurten_v1.csv\", delimiter =',', decimal =',', encoding ='utf-8', na_values=['#DIV/0!'])\n\n# df_flyer = pd.read_csv(\"fl2_test.csv\", delimiter =',', decimal ='.', encoding ='utf-8',na_values=['#DIV/0!'])\n\n# df = pd.read_csv(\"tk2017_maps_so_far2.csv\", delimiter =';', decimal =',', encoding ='utf-8',na_values=['#DIV/0!'])\n\ndf['kleur'] = 0\ndef fig_maps(df,token,size,color):\n if color == 'kleur':\n fig = px.scatter_mapbox(df, lat=\"Lat\", lon=\"Long\", hover_name=\"bureau_label\", hover_data=['Artikel 1', 'PvdD',\"GL\",\"DENK\",\"D66\"],\n size=size, zoom=6, height=650)\n else:\n fig = px.scatter_mapbox(df, lat=\"Lat\", lon=\"Long\", hover_name=\"bureau_label\", hover_data=['Artikel 1','PvdD', \"GL\",\"DENK\",\"D66\"],\n size=size, zoom=6, height=650,color=color)\n fig.update_layout(mapbox_accesstoken=token) #\n fig.update_layout(margin={\"r\": 0, \"t\": 0,\"l\": 0, \"b\": 0})\n fig.update_layout(uirevision=True)\n # fig.update_layout(hovermode=\"text\")\n return fig\n\ndef fig_campagne(df,token):\n fig = px.scatter_mapbox(df, lat=\"Lat\", lon=\"Long\", size=\"Flyers\",\n color_discrete_sequence=[\"fuchsia\"], size_max=50, zoom=12, height=650)\n fig.update_layout(mapbox_accesstoken=token)\n fig.update_layout(margin={\"r\": 0, \"t\": 0,\"l\": 0, \"b\": 0})\n fig.update_layout(uirevision=True)\n return fig\n\ndef get_polygon(lons, lats, color='blue'):\n if len(lons) != len(lats):\n raise ValueError('the legth of longitude list must coincide with that of latitude')\n geojd = {\"type\": \"FeatureCollection\"}\n geojd['features'] = []\n coords = []\n for lon, lat in zip(lons, lats):\n coords.append((lon, lat))\n coords.append((lons[0], lats[0])) #close the polygon\n geojd['features'].append({ \"type\": \"Feature\",\n \"geometry\": {\"type\": \"Polygon\",\n \"coordinates\": [coords] }})\n layer=dict(sourcetype = 'geojson',\n source =geojd,\n below='',\n type = 'fill',\n opacity = 0.3,\n color = color)\n return layer\n\ndef create_flyer_layers(df):\n mylayers =[]\n for buurt in list(df.columns):\n lats = []\n lons = []\n for i in range(0,len(df[buurt].dropna())):\n lats.append(float(df[buurt].dropna()[i].split(', ')[0]))\n lons.append(float(df[buurt].dropna()[i].split(', ')[1]))\n mylayers.append(get_polygon(lons=lons, lats=lats, color='#FFFF00'))\n return mylayers\n\ndef filter_data(list,df):\n if list in ([2, 1], [1, 2]):\n df1 = df[(df['percentage_gestemd_95_100'] == 1) | (df['percentage_gestemd_65_95'] == 1)]\n elif list in ([3, 1], [1, 3]):\n df1 = df[(df['percentage_gestemd_95_100'] == 1) | (df['percentage_gestemd_tot_65'] == 1)]\n elif list in ([3, 2], [2, 3]):\n df1 = df[(df['percentage_gestemd_65_95'] == 1) | (df['percentage_gestemd_tot_65'] == 1)]\n elif list == [1]:\n df1 = df[(df['percentage_gestemd_95_100'] == 1)]\n elif list == [2]:\n df1 = df[(df['percentage_gestemd_65_95'] == 1)]\n elif list == [3]:\n df1 = df[(df['percentage_gestemd_tot_65'] == 1)]\n else:\n df1 = df\n return df1\n\npartijen = ['Artikel 1','VVD', 'PvDA', 'PVV', 'SP', 'CDA', 'D66', 'CU',\n 'GL', 'SGP', 'PvdD', '50PLUS', 'OndernemersPartij',\n 'VNL (VoorNederland)', 'DENK', 'Forum voor Democratie',\n 'De Burger Beweging', 'Vrijzinnige Partij', 'GeenPeil', 'Piratenpartij']\npartij_options = []\nfor partij in partijen:\n partij_options.append({'label': str(partij), 'value': partij})\n\nradioitems = dbc.FormGroup([\n dbc.Label(\"Kies een partij uit de tweede kamer verkiezingen van 2017:\"),\n dbc.RadioItems(\n options=partij_options,\n value=\"Artikel 1\",\n id=\"radioitems-input\",\n ),\n ])\n\nradioitems_kleur = dbc.FormGroup([\n dbc.Label(\"Welke kleur van de bollen:\"),\n dbc.RadioItems(\n options=[{'label': 'Stembureaus', 'value': 'kleur'},{'label': 'Grootste Partij', 'value': 'grootste_partij'},\n {'label': 'Tweede Grootste Partij', 'value': 'tweede_grootste_partij'},\n {'label': 'Derde Grootste Partij', 'value': 'derde_grootste_partij'}],\n value='kleur',\n id=\"radioitems-input-grote\",\n inline=True\n ),\n ])\n\nchecklist = dbc.FormGroup([\n dbc.Label(\"Kies de voorwaarden voor Stembureau's\"),\n dbc.Checklist(\n options=[\n {\"label\": 'percentage_gestemd_95_100', \"value\": 1},\n {\"label\": 'percentage_gestemd_65_95', \"value\": 2},\n {\"label\": 'percentage_gestemd_tot_65', \"value\": 3}\n ],\n value=[1,2,3],\n id=\"checklist-input\",\n ),\n ])\n\n# image_filename = '../assets/bij1_streep.png' # replace with your own image\n# encoded_image = base64.b64encode(open(image_filename, 'rb').read())\n\nlayout = html.Div([\n\n # dbc.Row([\n # dbc.Col([html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), style={'height':'10%', 'width':'14%'})\n # ], width={\"size\": 8, \"offset\": 1})\n # ]),\n html.Br(),\n # html.P(id=\"radioitems-checklist-output\"),\n\n dbc.Row([\n dbc.Col([radioitems_kleur], width={\"size\": 8, \"offset\": 2})\n ]),\n\n dbc.Row([\n dbc.Col([radioitems], width={\"size\": 1, \"offset\": 1}),\n dbc.Col([\n html.Div([\n dcc.Graph(id='maps-plot'#,figure=fig_maps(df,hover_vars,token,size)\n )])\n ], width={\"size\": 8, \"offset\": 0}),\n dbc.Col([checklist]),\n ]),\n html.Br(),\n\n html.Div(\n daq.BooleanSwitch(\n id='switch_vk_flyer',\n on=True,\n labelPosition=\"top\",\n color='#FFFF00'\n )\n ),\n html.Div(id='boolean-switch-output2',\n style={'textAlign': 'center' # 'color': colors['text'],\n })\n])\n\n# app.layout = layout\n\n@app.callback(\n dash.dependencies.Output('boolean-switch-output2', 'children'),\n [dash.dependencies.Input('switch_vk_flyer', 'on')])\ndef update_output(on):\n if on:\n pick = \"Flyer gebieden AAN\"\n else:\n pick = \"Flyer gebieden uit\"\n return '{}'.format(pick)\n\n@app.callback(\n Output(\"radioitems-checklist-output\", \"children\"),\n [Input(\"checklist-input\", \"value\")]\n)\ndef on_form_change(radio_items_value):\n template = \"Radio button {} and type {}\".format(radio_items_value, type(radio_items_value))\n return template\n\n@app.callback(\n Output('maps-plot', \"figure\"),\n [Input(\"radioitems-input\", \"value\"),\n Input(\"radioitems-input-grote\", \"value\"),\n Input(\"checklist-input\",\"value\"),\n Input('switch_vk_flyer', 'on')]\n)\ndef on_form_change(radio_items_value,color_value,filter,tk_flyer):\n df_filtered = filter_data(filter, df)\n fig = fig_maps(df_filtered, token, radio_items_value, color_value)\n if tk_flyer:\n # fig = fig_campagne(df_flyer,token)\n mylayers = create_flyer_layers(df_buurten)\n fig.layout.update(mapbox_layers=mylayers)\n return fig\n else:\n return fig\n\n # return fig\n\n# if __name__ == '__main__':\n# app.run_server(debug=True)", "sub_path": "apps/mapping.py", "file_name": "mapping.py", "file_ext": "py", "file_size_in_byte": 8038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "plotly.express.scatter_mapbox", "line_number": 30, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 30, "usage_type": "name"}, {"api_name": "plotly.express.scatter_mapbox", "line_number": 33, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 33, "usage_type": "name"}, {"api_name": "plotly.express.scatter_mapbox", "line_number": 42, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 42, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.FormGroup", "line_number": 105, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Label", "line_number": 106, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.RadioItems", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.FormGroup", "line_number": 114, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Label", "line_number": 115, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.RadioItems", "line_number": 116, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.FormGroup", "line_number": 126, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Label", "line_number": 127, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Checklist", "line_number": 128, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 142, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 151, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 152, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 155, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 156, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 157, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 158, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 159, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 164, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 166, "usage_type": "call"}, {"api_name": "dash_daq.BooleanSwitch", "line_number": 167, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 174, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 181, "usage_type": "call"}, {"api_name": "app.app", "line_number": 181, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 182, "usage_type": "call"}, {"api_name": "dash.dependencies", "line_number": 182, "usage_type": "attribute"}, {"api_name": "dash.dependencies.Input", "line_number": 183, "usage_type": "call"}, {"api_name": "dash.dependencies", "line_number": 183, "usage_type": "attribute"}, {"api_name": "app.app.callback", "line_number": 191, "usage_type": "call"}, {"api_name": "app.app", "line_number": 191, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 192, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 193, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 199, "usage_type": "call"}, {"api_name": "app.app", "line_number": 199, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 200, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 201, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 202, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 203, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 204, "usage_type": "call"}]} {"seq_id": "87952341", "text": "#! /usr/bin/env python\n# -*- coding: utf8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorlayer as tl\n\nfrom nets import slim_nets_factory\nimport prepro\n\nimport os\nimport numpy as np\nimport cv2\nfrom glob import glob\n\ntf.app.flags.DEFINE_string('src_root', None, 'The path of src.')\ntf.app.flags.DEFINE_string('net_name', None, 'The name of the network.')\ntf.app.flags.DEFINE_string('im_path', None, 'The path of input images.')\ntf.app.flags.DEFINE_integer('num_classes', 1000, 'The number of classes.')\ntf.app.flags.DEFINE_string('test_tfrecords', None, 'The path of the test tfrecords file.')\ntf.app.flags.DEFINE_string('test_file_list', None, 'The path of the test image list.')\ntf.app.flags.DEFINE_string('preprocess_list', None, 'The path of the preprocess list.')\ntf.app.flags.DEFINE_integer('batch_size', 16, 'The batch size for training.')\ntf.app.flags.DEFINE_string('load_model', None, 'The load path of the model.')\n\nFLAGS = tf.app.flags.FLAGS\n\nclass file_manager(object):\n def __init__(self, file_path):\n self.path = file_path\n\n f = open(self.path, 'r')\n self.list = f.readlines()\n self.file_num = len(self.list)\n f.close()\n\n def output_filenum(self):\n return self.file_num\n \n def output_filelist(self):\n return self.list\n \ndef get_im_input_size():\n im_list = glob(FLAGS.img_path+'*.tif')\n im = cv2.imread(im_list[0])\n return im.shape[0], im.shape[1], im.shape[2]\n\ndef test_prepro(tensor, im_input_size, im_output_size):\n img_size = im_input_size\n img = tf.reshape(tensor, [img_size, img_size, 3])\n img = tf.cast(img, tf.float32)\n im_prep = prepro.preprocess(FLAGS.preprocess_list)\n img = im_prep.im_test_preprocess(img, im_output_size)\n return img\n\ndef inference(network_fn, x_crop, y_):\n network = network_fn(x_crop)\n y = network.outputs\n try:\n cost = tl.cost.cross_entropy(y, y_, name='cost')\n except:\n y = tf.squeeze(y, [1, 2], name='squeezed')\n cost = tl.cost.cross_entropy(y, y_, name='cost')\n #ce = tl.cost.cross_entropy(y, y_, name='cost')\n # L2 for the MLP, without this, the accuracy will be reduced by 15%.\n #L2 = tf.contrib.layers.l2_regularizer(0.004)(network.all_params[28])# + \\\n # tf.contrib.layers.l2_regularizer(0.004)(network.all_params[6])\n #cost = ce + L2\n\n # correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)\n correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return cost, acc, network \n\ndef test_slim_net():\n test_network_fn = slim_nets_factory.get_network_fn(\n FLAGS.net_name,\n num_classes=FLAGS.num_classes,\n is_training=False,\n reuse=None)\n \n img_size = test_network_fn.default_image_size\n im_input_size, _, im_channel = get_im_input_size()\n\n t_image = tf.placeholder('float32', [1, img_size, img_size, 2], name='input_image')\n x_pre = test_prepro(t_image, im_input_size, img_size)\n y_ = tf.placeholder(tf.int32, shape=[1,])\n \n print('\\nTest Network Model:')\n cost_test, acc_test, network = inference(test_network_fn, x_pre, y_)\n print('')\n\n test_file_list = file_manager(FLAGS.test_file_list)\n test_num = test_file_list.output_filenum()\n test_list =test_file_list.output_filelist() \n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)\n with tf.Session(config=config) as sess:\n #sess.run(tf.global_variables_initializer())\n tl.layers.initialize_global_variables(sess)\n \n weight_file = FLAGS.load_model\n if not os.path.isfile(weight_file):\n raise ValueError('Model is not exisited.')\n npz = np.load(weight_file)\n params = []\n for vals in sorted( npz.items() ):\n for val in vals[1]:\n #print(\" Loading %s\" % str(val.shape))\n params.append(val) \n tl.files.assign_params(sess, params, network)\n #network.print_params()\n \n batch_size = FLAGS.batch_size\n \n print('\\nInput Data Info:')\n print(' test_file_num:', test_num)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n print('\\nTesting ...')\n step = 0\n test_loss, test_acc, n_batch = 0, 0, 0\n for _ in range(int(test_num/batch_size)):\n val = cv2.imread(os.path.join(FLAGS.im_path, test_list[step].split(' ')[0]+'.tif'))\n val = val.reshape(1, img_size, img_size, im_channel)\n l = int(test_list[step].split(' ')[1]);\n feed_dict = {t_image: val, y_: l}\n feed_dict.update(network.all_drop)\n err, ac, out = sess.run([cost_test, acc_test, network.outputs], feed_dict=feed_dict)\n test_loss += err; test_acc += ac; n_batch += 1\n print(' Average test loss:', test_loss/ n_batch, '\\tAverage test acc:', test_acc/ n_batch)\n\n coord.request_stop()\n coord.join(threads)\n\ndef main(_):\n if FLAGS.net_name is None:\n raise ValueError('Please input the name of the network')\n if FLAGS.net_name.split('_')[0] == 'slim':\n test_slim_net()\n\nif __name__ == '__main__':\n tf.app.run() ", "sub_path": "src/single_label/train/test_slim_output.py", "file_name": "test_slim_output.py", "file_ext": "py", "file_size_in_byte": 5561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.app", "line_number": 29, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "prepro.preprocess", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorlayer.cost.cross_entropy", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorlayer.cost", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorlayer.cost.cross_entropy", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorlayer.cost", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 75, "usage_type": "attribute"}, {"api_name": "nets.slim_nets_factory.get_network_fn", "line_number": 80, "usage_type": "call"}, {"api_name": "nets.slim_nets_factory", "line_number": 80, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.GPUOptions", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorlayer.layers.initialize_global_variables", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorlayer.layers", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorlayer.files.assign_params", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorlayer.files", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Coordinator", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tensorflow.train.start_queue_runners", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 125, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.app.run", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 150, "usage_type": "attribute"}]} {"seq_id": "294924798", "text": "\"\"\"Configuration\nThis module takes care of reading all the configurations from the project\ndirectory.\n\"\"\"\n\n\nimport importlib\nimport logging\nfrom os import listdir\nfrom os.path import isfile, join\n\n\nclass Configuration(object):\n \"\"\"Configuration\n This object reads all the configuration objects available at the project\n resources directory.\n \"\"\"\n\n def __init__(self, configuration_path):\n \"\"\"Constructor\n \"\"\"\n self.log = logging.getLogger(__name__)\n self._path = configuration_path\n self.list()\n self.read_configurations()\n\n def list(self):\n \"\"\"List Configuration Files\n \"\"\"\n all_files = [\n f for f in listdir(self._path)\n if isfile(join(self._path, f))\n ]\n self._files = {\n f.replace('.py', ''): self._path + f\n for f in all_files\n if f.startswith('configuration') and f.endswith('.py')\n }\n\n\n def read_configurations(self):\n \"\"\"Reads Configurations from files\n \"\"\"\n self._cfg_modules = {\n k: importlib.machinery.SourceFileLoader(k, v).load_module()\n for k, v in self._files.items()\n }\n self._configurations = {\n k: v.create()\n for k, v in self._cfg_modules.items()\n }\n", "sub_path": "src/readers/configuration.py", "file_name": "configuration.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "importlib.machinery.SourceFileLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "importlib.machinery", "line_number": 45, "usage_type": "attribute"}]} {"seq_id": "374687358", "text": "'''\nCreated on 14 de ago de 2016\n\n@author: anderson\n'''\nfrom django.conf.urls import patterns, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('web_critic.core.views',\n url(r'^$', 'home', name='home'),\n url(r'^contact/$', 'contact', name=\"contact\"),\n )\n", "sub_path": "web_critic/web_critic/core/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}]} {"seq_id": "234251200", "text": "import pygame\nimport sys\nimport random\n\ninitial = []\ncolor = (20 , 98 , 108)\nx= 0\ny = 0\n\ninput_str = raw_input('Enter co-ordinate 1:').split(' ')\nx = int(input_str[0])\ny = int(input_str[1])\ninitial.append([x , y])\n\ninput_str = raw_input('Enter co-ordinate 2:').split(' ')\nx = int(input_str[0])\ny = int(input_str[1])\ninitial.append([x , y])\n\ninput_str = raw_input('Enter co-ordinate 3:').split(' ') \nx = int(input_str[0])\ny = int(input_str[1])\ninitial.append([x , y])\n\nFPS = int(raw_input('Enter the framerate:'))\nclock = pygame.time.Clock()\npygame.init()\nDISPLAYSURF = pygame.display.set_mode((801 , 601))\npygame.display.set_caption('Chaos_Theory')\n\npix = pygame.PixelArray(DISPLAYSURF)\npix[initial[0][0]][initial[0][1]] = color\npix[initial[1][0]][initial[1][1]] = color\npix[initial[2][0]][initial[2][1]] = color\n\nx = random.randint(0 , 800)\ny = random.randint(0 , 600)\n\nwhile True:\n\tpix[x][y] = color\n\tdice = random.randint(0 , 5)\n\tdice = dice / 2\n\tx = (x + initial[dice][0]) / 2\n\ty = (y + initial[dice][1]) / 2\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\tpygame.display.update()\n\tclock.tick(FPS)\n", "sub_path": "Pygame_chaos.py", "file_name": "Pygame_chaos.py", "file_ext": "py", "file_size_in_byte": 1152, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pygame.time.Clock", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.PixelArray", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}]} {"seq_id": "230385615", "text": "\"\"\"\nexpm.py - a module for all things e^M\n\"\"\"\n\nfrom autograd import jacobian\nfrom autograd.extend import (defvjp as autograd_defvjp,\n primitive as autograd_primitive)\nimport numpy as np\nimport scipy.linalg as la\n\nfrom qoc.models.operationpolicy import OperationPolicy\n\n@autograd_primitive\ndef expm(matrix, operation_policy=OperationPolicy.CPU):\n \"\"\"\n Compute the matrix exponential of a matrix.\n Args:\n matrix :: numpy.ndarray - the matrix to exponentiate\n operation_policy :: qoc.OperationPolicy - what data type is\n used to perform the operation and with which method\n Returns:\n exp_matrix :: numpy.ndarray - the exponentiated matrix\n \"\"\"\n if operation_policy == OperationPolicy.CPU:\n exp_matrix = la.expm(matrix)\n else:\n pass\n\n return exp_matrix\n\n\ndef _expm_vjp(exp_matrix, matrix, operation_policy=OperationPolicy.CPU):\n \"\"\"\n Construct the left-multiplying vector jacobian product function\n for the matrix exponential.\n\n Intuition:\n `dfinal_dexpm` is the jacobian of `final` with respect to each element `expmij`\n of `exp_matrix`. `final` is the output of the first function in the\n backward differentiation series. It is also the output of the last\n function evaluated in the chain of functions that is being differentiated,\n i.e. the final cost function. The goal of `vjp_function` is to take\n `dfinal_dexpm` and yield `dfinal_dmatrix` which is the jacobian of\n `final` with respect to each element `mij` of `matrix`.\n To compute the frechet derivative of the matrix exponential with respect\n to each element `mij`, we use the approximation that\n dexpm_dmij = np.matmul(Eij, exp_matrix). Since Eij has a specific\n structure we don't need to do the full matrix multiplication and instead\n use some indexing tricks.\n\n Args:\n exp_matrix :: numpy.ndarray - the matrix exponential of matrix\n matrix :: numpy.ndarray - the matrix that was exponentiated\n operation_policy :: qoc.OperationPolicy - what data type is\n used to perform the operation and with which method\n\n Returns:\n vjp_function :: numpy.ndarray -> numpy.ndarray - the function that takes\n the jacobian of the final function with respect to `exp_matrix`\n to the jacobian of the final function with respect to `matrix`\n \"\"\"\n if operation_policy == OperationPolicy.CPU:\n matrix_size = matrix.shape[0]\n \n def _expm_vjp_cpu(dfinal_dexpm):\n dfinal_dmatrix = np.zeros((matrix_size, matrix_size), dtype=np.complex128)\n\n # Compute a first order approximation of the frechet derivative of the matrix\n # exponential in every unit direction Eij.\n for i in range(matrix_size):\n for j in range(matrix_size):\n dexpm_dmij_rowi = exp_matrix[j,:]\n dfinal_dmatrix[i, j] = np.sum(np.multiply(dfinal_dexpm[i, :], dexpm_dmij_rowi))\n #ENDFOR\n #ENDFOR\n\n return dfinal_dmatrix\n #ENDDEF\n\n vjp_function = _expm_vjp_cpu\n else:\n pass\n\n return vjp_function\n\n\nautograd_defvjp(expm, _expm_vjp)\n\n\n### MODULE TESTS ###\n\n_BIG = 30\n\ndef _get_skew_hermitian_matrix(matrix_size):\n \"\"\"\n Args:\n matrix_size :: int - square matrix size\n\n Returns:\n skew_hermitian_matrix :: numpy.ndarray - a skew-hermitian matrix\n of `matrix_size`\n \"\"\"\n matrix = (np.random.rand(matrix_size, matrix_size)\n + 1j * np.random.rand(matrix_size, matrix_size))\n \n return np.divide(matrix - conjugate_transpose(matrix), 2)\n\ndef _tests():\n \"\"\"\n Run tests on the module.\n Args: none\n Returns: none\n \"\"\"\n\n # Test that the end-to-end gradient of the matrix exponential is working.\n m = np.array([[1., 0.],\n [0., 1.]])\n m_len = m.shape[0]\n exp_m = np.exp(m)\n dexpm_dm_expected = np.zeros((m_len, m_len, m_len, m_len), dtype=m.dtype)\n dexpm_dm_expected[0, 0, 0, 0] = exp_m[0, 0]\n dexpm_dm_expected[0, 1, 0, 1] = exp_m[0, 0]\n dexpm_dm_expected[1, 0, 1, 0] = exp_m[1, 1]\n dexpm_dm_expected[1, 1, 1, 1] = exp_m[1, 1]\n \n dexpm_dm = jacobian(expm, 0)(m)\n\n assert(np.allclose(dexpm_dm, dexpm_dm_expected))\n\n\nif __name__ == \"__main__\":\n _tests()\n", "sub_path": "qoc/standard/functions/expm.py", "file_name": "expm.py", "file_ext": "py", "file_size_in_byte": 4307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "qoc.models.operationpolicy.OperationPolicy.CPU", "line_number": 14, "usage_type": "attribute"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy", "line_number": 14, "usage_type": "name"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy.CPU", "line_number": 24, "usage_type": "attribute"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy", "line_number": 24, "usage_type": "name"}, {"api_name": "scipy.linalg.expm", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 25, "usage_type": "name"}, {"api_name": "autograd.extend.primitive", "line_number": 13, "usage_type": "name"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy.CPU", "line_number": 32, "usage_type": "attribute"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy", "line_number": 32, "usage_type": "name"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy.CPU", "line_number": 62, "usage_type": "attribute"}, {"api_name": "qoc.models.operationpolicy.OperationPolicy", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.complex128", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 73, "usage_type": "call"}, {"api_name": "autograd.extend.defvjp", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.divide", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "autograd.jacobian", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 128, "usage_type": "call"}]} {"seq_id": "436070446", "text": "import time\nimport requests\nimport telebot\nimport praw\nfrom prawcore import NotFound\nfrom telebot import types\nfrom telebot.types import ForceReply\nfrom datetime import datetime\nimport psycopg2\nimport threading\n\n# ะฟะพะดะบะปัŽั‡ะฐะตะผ ะ‘ะ”\ncon = psycopg2.connect(\n database=\"\",\n user=\"\",\n password=\"\",\n host=\"\",\n port=\"\"\n)\ncur = con.cursor()\n# ะธะผั ั‚ะฐะฑะปะธั†ั‹ ะฒ ะฑะด\ntable = 'alfa'\ntable_id = 'alfa_id'\n# ั‚ะพะบะตะฝ ะฑะพั‚ะฐ\nbot = telebot.TeleBot('')\n# ะฟะพะดะปัŽั‡ะตะฝะธะต ั€ะตะดะดะธั‚ะฐ\nreddit = praw.Reddit(client_id='',\n client_secret='',\n user_agent='')\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n print(message)\n cur.execute(\n 'INSERT INTO {table} (id, username, datalast, subscription, datareg) VALUES ({id}, \\'{username}\\', '\n '\\'{datalast}\\', \\'{subscription}\\', \\'{datareg}\\');'.format(\n table=table,\n id=message.chat.id,\n datalast=str(datetime.utcfromtimestamp(int(message.date)).strftime('%Y-%m-%d %H:%M:%S')),\n datareg=str(datetime.utcfromtimestamp(int(message.date)).strftime('%Y-%m-%d %H:%M:%S')),\n username=message.from_user.username,\n subscription=''))\n con.commit()\n print('Chat Id ' + str(message.chat.id))\n bot.send_message(message.chat.id, 'ะŸั€ะธะฒะตั‚\\n'\n 'ะญั‚ะพั‚ ะฑะพั‚ ะฟั€ะตะดะฝะฐะทั‡ะตะฝ ะดะปั ะฟะพะปัƒั‡ะตะฝะธั ะฟะพัั‚ะพะฒ ั ะฟะปะพั‰ะฐะดะบะธ Reddit',\n reply_markup=keyboard_start())\n bot.register_next_step_handler(message, post(message))\n\n\ndef post(message):\n while True:\n cur.execute(\"\"\"SELECT id, username, subscription from {table} WHERE subscription NOT IN ('')\"\"\".format(\n table=table\n ))\n subred = cur.fetchall()\n cur.execute(\"\"\"SELECT id_set from {table}\"\"\".format(\n table=table_id\n ))\n con.commit()\n id_set = cur.fetchall()\n print(id_set)\n for item in subred:\n if item[0] == message.chat.id:\n for submission in reddit.subreddit(item[2]).hot(limit=5):\n if submission.id not in id_set:\n print(\"ะขะฐะบะพะณะพ ะฟะพัั‚ะฐ ะตั‰ะต ะฝะต ะฑั‹ะปะพ\")\n print(\"ID: \" + submission.id)\n print('NAME: ' + item[2])\n print('TITLE: ' + submission.title)\n print('--------------------------------------')\n # submission.title_finaly = '' + submission.title + ''\n # if not submission.url:\n # if not submission.selftext:\n # print('1_1')\n # bot.send_message(message.chat.id,\n # submission.title_finaly + '\\n' + '#' + str(\n # submission.subreddit), parse_mode='HTML')\n # else:\n # print('1_2')\n # bot.send_message(message.chat.id,\n # submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n # submission.subreddit), parse_mode='HTML')\n # elif is_url_image(submission.url) and submission.url[:17] == 'https://i.redd.it':\n # if not submission.selftext:\n # print('3_1')\n # bot.send_photo(message.chat.id,\n # caption=submission.title_finaly + '\\n' + '#' + str(\n # submission.subreddit), photo=submission.url, parse_mode='HTML')\n # else:\n # print('3_2')\n # bot.send_photo(message.chat.id,\n # caption=submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n # submission.subreddit), photo=submission.url, parse_mode='HTML')\n # elif submission.url[:5] == 'https':\n # if not submission.selftext:\n # print('2_1')\n # bot.send_message(message.chat.id,\n # submission.title_finaly + '\\n\\n' + submission.url + '\\n' + '#' + str(\n # submission.subreddit), parse_mode='HTML')\n # else:\n # print('2_2')\n # bot.send_message(message.chat.id,\n # submission.title_finaly + '\\n\\n' + submission.selftext + '\\n\\n' + submission.url + '\\n' + '#' + str(\n # submission.subreddit), parse_mode='HTML')\n # else:\n # if not submission.selftext:\n # print('4_1')\n # bot.send_message(message.chat.id, submission.title_finaly + '\\n' '#' + str(\n # submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n # else:\n # print('4_2')\n # print('sfsfsf')\n # bot.send_message(message.chat.id,\n # submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n # submission.subreddit), parse_mode='HTML')\n cur.execute(\n 'INSERT INTO {table} (id_set) VALUES (\\'{id}\\');'.format(table=table_id, id=submission.id))\n con.commit()\n time.sleep(30)\n\n\ndef post_reddit(message, submission):\n if not submission.url:\n if not submission.selftext:\n print('1_1')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML')\n else:\n print('1_2')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML')\n elif is_url_image(submission.url) and submission.url[:17] == 'https://i.redd.it':\n if not submission.selftext:\n print('3_1')\n bot.send_photo(message.chat.id,\n caption=submission.title_finaly + '\\n' + '#' + str(\n submission.subreddit), photo=submission.url, parse_mode='HTML')\n else:\n print('3_2')\n bot.send_photo(message.chat.id,\n caption=submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n submission.subreddit), photo=submission.url, parse_mode='HTML')\n elif submission.url[:5] == 'https':\n if not submission.selftext:\n print('2_1')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.url + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML')\n else:\n print('2_2')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.selftext + '\\n\\n' + submission.url + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML')\n else:\n if not submission.selftext:\n print('4_1')\n bot.send_message(message.chat.id, submission.title_finaly + '\\n' '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n else:\n print('4_2')\n print('sfsfsf')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML')\n\n\ndef keyboard_start():\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_subs = types.KeyboardButton(text=\"ะฃะฟั€ะฐะฒะปะตะฝะธะต ะฟะพะดะฟะธัะบะฐะผะธ\")\n button_get = types.KeyboardButton(text=\"ะŸะพะปัƒั‡ะธั‚ัŒ ะฟะพัั‚ ั ััั‹ะปะบะธ\")\n button_help = types.KeyboardButton(text=\"ะ”ั€ัƒะณะธะต ั„ัƒะฝะบั†ะธะธ\")\n keyboard.add(button_subs, button_get, button_help)\n return keyboard\n\n\n@bot.message_handler(content_types=['text'])\ndef send(message):\n if message.text == 'ะฃะฟั€ะฐะฒะปะตะฝะธะต ะฟะพะดะฟะธัะบะฐะผะธ':\n keyboard_subscription(message)\n elif message.text == 'ะŸะพะปัƒั‡ะธั‚ัŒ ะฟะพัั‚ ั ััั‹ะปะบะธ':\n bot.send_message(message.chat.id, 'ะ’ะฒะตะดะธั‚ะต ััั‹ะปะบัƒ ะฝะฐ ะฟะพัั‚', reply_markup=ForceReply())\n bot.register_next_step_handler(message, get_post)\n elif message.text == 'ะ”ั€ัƒะณะธะต ั„ัƒะฝะบั†ะธะธ':\n bot.send_message(message.chat.id, 'ะ”ะพัั‚ัƒะฟะฝั‹ ะบะพะผะฐะฝะดั‹:\\n'\n '/list - ัะฟะธัะพะบ ะฒะฐัˆะธั… ะฟะพะดะฟะธัะพะบ\\n')\n elif message.text == '/list':\n subscription_list_text_post(message)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_worker(call):\n if call.data == 'subscription_list':\n bot.answer_callback_query(call.id)\n subscription_list_text_post(call.message)\n bot.delete_message(call.message.chat.id, call.message.message_id)\n elif call.data == 'subscription_add':\n bot.answer_callback_query(call.id)\n bot.send_message(call.message.chat.id, 'ะ”ะพะฑะฐะฒัŒั‚ะต ะฝะพะฒัƒัŽ ะฟะพะดะฟะธัะบัƒ', reply_markup=ForceReply())\n bot.register_next_step_handler(call.message, add_subscription)\n bot.delete_message(call.message.chat.id, call.message.message_id)\n elif call.data == 'subscription_remove':\n subscription_list_text_post(call.message)\n bot.answer_callback_query(call.id)\n cur = con.cursor()\n cur.execute(\n \"\"\"SELECT id, username, subscription from {table} WHERE subscription NOT IN ('')\"\"\".format(table=table))\n subscription_list_array = cur.fetchall()\n index = False\n for item in subscription_list_array:\n if item[0] == call.message.chat.id and item:\n index = True\n else:\n index = True\n if index:\n bot.send_message(call.message.chat.id,\n 'ะšะฐะบัƒัŽ ะฟะพะดะฟะธัะบัƒ ัƒะดะฐะปัะตะผ?\\nะะฐะถะผะธั‚ะต ะฝะฐ ะฝะฐะทะฒะฐะฝะธะต ะฟะพะดะฟะธัะบะธ ะฒ ัะฟะธัะบะต ะฒั‹ัˆะต ะธ ะพะฝะพ '\n 'ัะบะพะฟะธั€ัƒะตั‚ัั ะฒ '\n 'ะฑัƒั„ะตั€ ะพะฑะผะตะฝะฐ',\n reply_markup=ForceReply())\n break\n else:\n bot.send_message(call.message.chat.id, 'ะ’ะฐัˆ ัะฟะธัะพะบ ะฟะพะดะฟะธัะพะบ ะฟัƒัั‚')\n subscription_list_text_post(call.message)\n bot.register_next_step_handler(call.message, remove_subscription)\n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n\ndef add_subscription(message):\n # ะฟั€ะพะฒะตั€ะบะฐ ะฝะฐ ะฟั€ะธััƒั‚ัะฒะธะต ะดะฐะฝะฝะพะน ะฟะพะดะฟะธัะบะธ ะฒ ัะฟะธัะบะต\n if sub_exists_remove(message.text):\n bot.send_message(message.chat.id, 'ะ”ะฐะฝะฝะฐั ะฟะพะดะฟะธัะบะฐ ัƒะถะต ะพั„ะพั€ะผะปะตะฝะฐ')\n subscription_list_text_post(message)\n else:\n # ะฟั€ะพะฒะตั€ะบะฐ ะฝะฐ ััƒั‰ะตัั‚ะพะฒะพะฒะฐะฝะฝะธะต ัั‚ั€ะฐะฝะธั†ั‹ ะฝะฐ Reddit\n bot.send_message(message.chat.id, 'ะŸั€ะพะฒะตั€ะบะฐ ะฝะฐ ััƒั‰ะตัั‚ะพะฒะพะฒะฐะฝะฝะธะต ัั‚ั€ะฐะฝะธั†ั‹...')\n time.sleep(2)\n if sub_exists_add(message.text):\n # ะดะพะฑะฐะฒะปะตะฝะธะต ะดะฐะฝะฝั‹ั… ะฒ ะ‘ะ”\n cur.execute(\n 'INSERT INTO {table} (id, username, datalast, subscription) VALUES ({id}, \\'{username}\\', '\n '\\'{datalast}\\', '\n '\\'{subscription}\\');'.format(\n table=table,\n id=message.chat.id,\n datalast=str(datetime.utcfromtimestamp(int(message.date)).strftime('%Y-%m-%d %H:%M:%S')),\n username=message.from_user.username,\n subscription=message.text))\n con.commit()\n bot.send_message(message.chat.id, 'ะŸะพะดะฟะธัะบะฐ ัƒัะฟะตัˆะฝะพ ะพั„ะพั€ะผะปะตะฝะฝะฐ')\n time.sleep(1)\n subscription_list_text_post(message)\n else:\n bot.send_message(message.chat.id, 'ะ”ะฐะฝะฝะพะน ัั‚ั€ะฐะฝะฝะธั†ั‹ ะฝะต ััƒั‰ะตัั‚ะฒัƒะตั‚ ะฝะฐ Reddit\\n'\n 'ะŸะพะถะฐะปัƒะนัั‚ะฐ ะฟั€ะพะฒะตั€ัŒั‚ะต ะฟั€ะฐะฒะธะปัŒะฝะพัั‚ัŒ ะฝะฐะทะฒะฐะฝะธั\\n'\n 'ะ’ะพะทะผะพะถะฝะพะน ะพัˆะธะฑะบะพะน ะผะพะถะตั‚ ะฑั‹ั‚ัŒ ะฟั€ะธััƒัั‚ะฒะธะต ะฐั‚ั€ะธะฑัƒั‚ะฐ r/')\n\n\n# ะพั‚ะฟะธัะบะฐ + ะฟั€ะพะฒะตั€ะบะฐ\ndef remove_subscription(message):\n # ะฟั€ะพะฒะตั€ะบะฐ ะฝะฐ ะฟั€ะธััƒั‚ัะฒะธะต ะดะฐะฝะฝะพะน ะฟะพะดะฟะธัะบะธ ะฒ ัะฟะธัะบะต\n bot.send_message(message.chat.id, 'ะŸั€ะพะฒะตั€ะบะฐ ะฝะฐ ะฝะฐะปะธั‡ะธะต ะดะฐะฝะฝะพะน ัั‚ั€ะฐะฝะธั†ั‹ ะฒ ะฟะพะดะฟะธัะบะฐั…...')\n time.sleep(2)\n bot.delete_message(message.chat.id, message.message_id)\n if sub_exists_remove(message.text):\n time.sleep(1)\n # ัƒะดะฐะปะตะฝะธะต ะฟะพะดะฟะธัะบะธ ั ะ‘ะ”\n cur = con.cursor()\n cur.execute(\"\"\"DELETE from {table} WHERE subscription = \\'{subscription}\\' and id = {id}\"\"\".format(\n table=table,\n subscription=message.text,\n id=message.chat.id))\n con.commit()\n bot.send_message(message.chat.id, 'ะ’ั‹ ัƒัะฟะตัˆะฝะพ ะพั‚ะฟะธัะฐะปะธััŒ')\n time.sleep(1)\n subscription_list_text_post(message)\n else:\n bot.send_message(message.chat.id, 'ะ”ะฐะฝะฝะพะน ะฟะพะดะฟะธัะบะธ ัƒ ะฒะฐั ะฝะต ะพะฑะฝะฐั€ัƒะถะตะฝะฝะพ')\n\n\n# ัะพะพะฑั‰ะตะฝะธะต ะพ ะฟะพะดะฟะธัะบะฐั… ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั\ndef subscription_list_text_post(message):\n subscription_list_text = ''\n cur = con.cursor()\n index = False\n cur.execute(\"\"\"SELECT id, username, subscription from {table} WHERE subscription NOT IN ('')\"\"\".format(\n table=table\n ))\n subscription_list_array = cur.fetchall()\n for item in subscription_list_array:\n if item[0] == message.chat.id and item:\n subscription_list_text += 'โ€ข ' + '
' + item[2] + '
' + '\\n'\n index = True\n con.commit()\n if index:\n bot.send_message(message.chat.id, 'ะขะตะบัƒั‰ะธะต ะฟะพะดะฟะธัะบะธ:\\n{text}'.format(text=subscription_list_text),\n parse_mode='HTML', reply_markup=keyboard_start())\n else:\n bot.send_message(message.chat.id, 'ะŸะพะดะฟะธัะบะธ ะพั‚ััƒัั‚ะฒัƒัŽั‚', reply_markup=keyboard_start())\n\n\ndef keyboard_subscription(message):\n keyboard = types.InlineKeyboardMarkup()\n key_subscription_list = types.InlineKeyboardButton(text='ะ’ะฐัˆะธ ะฟะพะดะฟะธัะบะธ', callback_data='subscription_list')\n keyboard.add(key_subscription_list)\n key_subscription_add = types.InlineKeyboardButton(text='ะ”ะพะฑะฐะฒะธั‚ัŒ ะฝะพะฒัƒัŽ ะฟะพะดะฟะธัะบัƒ',\n callback_data='subscription_add')\n keyboard.add(key_subscription_add)\n key_subscription_remove = types.InlineKeyboardButton(text='ะžั‚ะผะตะฝะธั‚ัŒ ะฟะพะดะฟะธัะบัƒ',\n callback_data='subscription_remove')\n keyboard.add(key_subscription_remove)\n key_subreddit = types.InlineKeyboardButton(text='Reddit ัั‚ั€ะฐะฝะธั†ั‹', url='https://www.reddit.com/subreddits/')\n keyboard.add(key_subreddit)\n bot.send_message(message.chat.id, text='ะฃะฟั€ะฐะฒะปะตะฝะธะต ะฟะพะดะฟะธัะบะฐะผะธ', reply_markup=keyboard)\n\n\ndef get_post(message):\n try:\n link = message.text\n # ะฟะพะปัƒั‡ะตะฝะธั ะฟะพัั‚ะฐ ั Reddit ั‡ะตั€ะตะท ััั‹ะปะบัƒ\n collection = reddit.subreddit('SUBREDDIT').collections(permalink=link)\n submission = reddit.submission(collection)\n submission.title_finaly = '' + submission.title + ''\n if not submission.url:\n if not submission.selftext:\n print('1_1')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n else:\n print('1_2')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n elif is_url_image(submission.url) and submission.url[:17] == 'https://i.redd.it':\n if not submission.selftext:\n print('3_1')\n bot.send_photo(message.chat.id,\n caption=submission.title_finaly + '\\n' + '#' + str(\n submission.subreddit), photo=submission.url, parse_mode='HTML',\n reply_markup=keyboard_start())\n else:\n print('3_2')\n bot.send_photo(message.chat.id,\n caption=submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n submission.subreddit), photo=submission.url, parse_mode='HTML',\n reply_markup=keyboard_start())\n elif submission.url[:5] == 'https':\n if not submission.selftext:\n print('2_1')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.url + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n else:\n print('2_2')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.selftext + '\\n\\n' + submission.url + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n else:\n if not submission.selftext:\n print('4_1')\n bot.send_message(message.chat.id, submission.title_finaly + '\\n' '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n else:\n print('4_2')\n print('sfsfsf')\n bot.send_message(message.chat.id,\n submission.title_finaly + '\\n\\n' + submission.selftext + '\\n' + '#' + str(\n submission.subreddit), parse_mode='HTML', reply_markup=keyboard_start())\n except IndexError:\n bot.send_message(message.chat.id, 'ะขะฐะบะพะณะพ ะฟะพัั‚ะฐ ะฝะต ััƒั‰ะตัั‚ะฒัƒะตั‚')\n\n\ndef sub_exists_add(sub):\n exists = True\n try:\n reddit.subreddits.search_by_name(sub, exact=True)\n except NotFound:\n exists = False\n return exists\n\n\n# ะฟั€ะพะฒะตั€ะบะฐ ะฝะฐ ะฝะฐะปะธั‡ะธะต ะฒ ัะฟะธัะบะต\ndef sub_exists_remove(sub):\n cur = con.cursor()\n cur.execute(\"\"\"SELECT id, username, subscription from {table}\"\"\".format(\n table=table))\n subscription_list_array = cur.fetchall()\n for item in subscription_list_array:\n if sub == item[2]:\n return True\n\n\n# ะฟั€ะพะฒะตั€ะบะฐ, ัะฒะปัะตั‚ัŒัั ะปะธ ััั‹ะปะบะฐ ะบะฐั€ั‚ะธะฝะบะพะน\ndef is_url_image(image_url):\n image_formats = (\"image/png\", \"image/jpeg\", \"image/jpg\")\n r = requests.head(image_url)\n time.sleep(.25)\n if r.headers[\"content-type\"] in image_formats:\n return True\n return False\n\n\ndef log(message):\n print(\"\\n ------\")\n from datetime import datetime\n print(datetime.now())\n print(\"ะกะพะพะฑั‰ะตะฝะธะต ะพั‚ {0} {1}. (id = {2}) \\nะขะตะบัั‚ = {3}\".format(message.from_user.first_name,\n message.from_user.last_name,\n str(message.from_user.id), message.text))\n\n\nbot.polling(none_stop=True, interval=0)\n", "sub_path": "reddit_bot.py", "file_name": "reddit_bot.py", "file_ext": "py", "file_size_in_byte": 20880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "psycopg2.connect", "line_number": 13, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 25, "usage_type": "call"}, {"api_name": "praw.Reddit", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 121, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 172, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 172, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 173, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 173, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 174, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 174, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 175, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 175, "usage_type": "name"}, {"api_name": "telebot.types.ForceReply", "line_number": 185, "usage_type": "call"}, {"api_name": "telebot.types.ForceReply", "line_number": 202, "usage_type": "call"}, {"api_name": "telebot.types.ForceReply", "line_number": 223, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 240, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 249, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 249, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 254, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 266, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 269, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 278, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 306, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 306, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 307, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 307, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 309, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 309, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 312, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 312, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 315, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 315, "usage_type": "name"}, {"api_name": "prawcore.NotFound", "line_number": 381, "usage_type": "name"}, {"api_name": "requests.head", "line_number": 400, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 401, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 410, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 410, "usage_type": "name"}]} {"seq_id": "333105859", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom ansible.module_utils.jira_common import JiraModuleBase\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = \"\"\"\nmodule: jira_project_role_membership\nversion_added: \"0.0.1\"\nshort_description: manage a project role membership in Jira\ndescription:\n - Manage a project role membership in Jira\n\nextends_documentation_fragment:\n - jira_modules_common\n\noptions:\n project_key:\n required: false\n description:\n - The Jira key of the project.\n - Cannot be updated.\n - This parameter is mutually exclusive with C(project_id)\n\n project_id:\n required: false\n description:\n - The ID of the project.\n - Cannot be updated.\n - This parameter is mutually exclusive with C(project_key)\n\n role_name:\n required: false\n description:\n - The name of the Jira role\n - Cannot be updated.\n - This parameter is mutually exclusive with C(role_id)\n\n role_id:\n required: false\n description:\n - The ID of the Jira role\n - Cannot be updated.\n - This parameter is mutually exclusive with C(role_name)\n\n users:\n required: false\n description:\n - Users to belong to the role\n - Can be updated.\n\n groups:\n required: false\n description:\n - Groups to belong to the role\n - Can be updated.\n\nauthor: \"Joe Topjian \"\n\"\"\"\n\nRETURN = \"\"\"\njira_project_role_membership:\n type: dict\n description:\n - A Jira project role membership.\n - See\n https://docs.atlassian.com/software/jira/docs/api/REST/8.6.0/#api/2/project/{projectIdOrKey}/role-getProjectRole\n for the schema.\n returned: When a Jira project role membership was detected.\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: Ensure project role membership exists\n jira_project_role_membership:\n project_key: PRJ1\n role_name: Administrators\n users:\n - admin\n\"\"\"\n\nROLE_REST_ENDPOINT = \"rest/api/2/role\"\nREST_ENDPOINT = \"rest/api/2/project/%s/role/%s\"\n\n\nclass JiraProjectCategory(JiraModuleBase):\n \"\"\"Utility class to manage a Jira project role membership\"\"\"\n\n def __init__(self):\n self.module_args = dict(\n project_name=dict(\n required=False,\n _jira_field='project_name',\n _jira_update=False),\n\n project_key=dict(\n required=False,\n _jira_field='project_key',\n _jira_update=False),\n\n role_name=dict(\n required=False,\n _jira_field='role_name',\n _jira_update=False),\n\n role_id=dict(\n required=False,\n _jira_field='role_id',\n _jira_update=False),\n\n users=dict(\n type='list',\n required=False,\n _jira_field='users',\n _jira_update=True),\n\n groups=dict(\n type='list',\n required=False,\n _jira_field='groups',\n _jira_update=True),\n\n state=dict(\n required=False,\n default='present',\n choices=['absent', 'present']),\n )\n\n self.results = dict(\n jira_project_role_membership=dict(),\n changed=False,\n )\n\n super(JiraProjectCategory, self).__init__(\n derived_arg_spec=self.module_args,\n rest_endpoint=REST_ENDPOINT,\n mutually_exclusive=[\n ['project_id', 'project_key'], ['role_id', 'role_name']],\n required_one_of=[\n ['project_id', 'project_key'], ['role_id', 'role_name']],\n )\n\n def find_role_id(self):\n self.rest_endpoint = ROLE_REST_ENDPOINT\n roles = self.get()\n name = self.param('role_name')\n\n id = None\n for role in roles:\n if role[\"name\"] == name:\n id = role[\"id\"]\n\n return id\n\n def get_users_and_groups(self):\n users = []\n groups = []\n actors = self.get()\n for actor in actors['actors']:\n if actor['type'] == 'atlassian-user-role-actor':\n users.append(actor['name'])\n if actor['type'] == 'atlassian-group-role-actor':\n groups.append(actor['name'])\n return (users, groups)\n\n def exec_module(self, **kwargs):\n action = None\n is_install_mode = self.param('state') == 'present'\n\n try:\n prj = self.param('project_key')\n if prj is None:\n prj = self.param('project_key')\n\n role_id = self.param('role_id')\n if role_id is None:\n role_id = self.find_role_id()\n if role_id is None:\n self.fail(\"Unable to determine Jira role id: %s\",\n self.param('role_name'))\n\n self.rest_endpoint = REST_ENDPOINT % (prj, role_id)\n\n users = self.param('users')\n groups = self.param('groups')\n (_users, _groups) = self.get_users_and_groups()\n\n if not is_install_mode:\n action = 'deleted'\n\n if len(users) == 0 and len(_users) == 0 and \\\n len(groups) == 0 and len(_groups) == 0:\n action = None\n else:\n if (set(users) != set(_users)) or \\\n (set(groups) != set(_groups)):\n action = 'updated'\n\n self.results['jira_project_role_membership_action'] = action\n\n if len(users) == 0 and len(groups) == 0:\n del(self.results['jira_project_role_membership'])\n\n if action is not None:\n self.results['changed'] = True\n\n if self.check_mode:\n return\n\n if action == 'updated' or action == 'deleted':\n data = dict()\n key = 'categorisedActors'\n data[key] = dict()\n\n gKey = 'atlassian-group-role-actor'\n uKey = 'atlassian-user-role-actor'\n\n if action == 'deleted':\n data[key][gKey] = []\n data[key][uKey] = []\n\n if action == 'updated':\n if len(groups) > 0:\n data[key][gKey] = groups\n\n if len(users) > 0:\n data[key][uKey] = users\n\n self.put(data)\n\n (_users, _groups) = self.get_users_and_groups()\n if len(_users) == 0 and len(_groups) == 0:\n del(self.results['jira_project_role_membership'])\n else:\n v = {\n 'users': _users,\n 'groups': _groups,\n }\n\n self.results['jira_project_role_membership'] = v\n\n except Exception as e:\n self.fail(msg=e.message)\n\n\nif __name__ == '__main__':\n JiraProjectCategory()\n", "sub_path": "library/jira_project_role_membership.py", "file_name": "jira_project_role_membership.py", "file_ext": "py", "file_size_in_byte": 7037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "ansible.module_utils.jira_common.JiraModuleBase", "line_number": 92, "usage_type": "name"}]} {"seq_id": "352212359", "text": "# coding=utf-8\n\"\"\"\nModule responsible for the definition of functions that convert Raw units (available in the\nacquisition files returned by OpenSignals) and sample units to physical units like mV, A, ยบC,\ns,..., accordingly to the sensor under analysis.\n\nAvailable Functions\n-------------------\n[Public]\n\nraw_to_phy\n Function that converts each sample value in raw units to a physical unit taking into account\n the respective transfer function for the sensor and device specified as an input.\ngenerate_time\n Considering the acquisition sampling rate and the number of samples that compose\n the signal, this function will return a time axis in seconds.\n\nObservations/Comments\n---------------------\nNone\n\n/\\\n\"\"\"\n\nimport numpy\nfrom .aux_functions import _is_a_url, _generate_download_google_link, _calc_time_precision, _truncate_time, \\\n _truncate_value, _calc_value_precision\nfrom .load import load\nimport math\nfrom scipy.constants import g\n\n\ndef raw_to_phy(sensor, device, raw_signal, resolution, option, truncate=True):\n \"\"\"\n -----\n Brief\n -----\n Function for converting raw units to physical units.\n\n -----------\n Description\n -----------\n Each sensor and device has a specific transfer function that models the inputs to outputs. This transfer function\n is, thus, used in order to convert the raw units that are measured to physical units that originated the data.\n\n This functions makes the conversion of raw units to physical units, using the information of sensor and device.\n\n ----------\n Parameters\n ----------\n sensor : str\n Sensor label:\n - \"ECG\"\n - \"EMG\"\n - \"TEMP\"\n - \"BVP\"\n - \"SpO2.HEAD\"\n - \"SpO2.FING\"\n - \"SpO2.ARM\"\n - \"EEG\"\n - \"EDA\"\n\n device : str\n PLUX device label:\n - \"bioplux\"\n - \"bioplux_exp\"\n - \"biosignalsplux\"\n - \"rachimeter\"\n - \"channeller\"\n - \"swifter\"\n - \"ddme_openbanplux\"\n\n raw_signal : list\n Raw signal samples.\n\n resolution : int\n Resolution selected during acquisition.\n\n option : str (optional)\n Output units (only available in certain sensors):\n - \"mV\"\n - \"V\"\n - \"C\" (Celsius)\n - \"K\" (Kelvin)\n - \"Ohm\"\n - \"A\"\n - \"uA\"\n - \"g\"\n - \"m/s^2\"\n (When is not applicable a warning message is raised).\n\n truncate: boolean (optional)\n indicates whether the resulting\n\n Returns\n -------\n out : list\n Signal in the new scale.\n \"\"\"\n\n raw_signal = numpy.array(raw_signal)\n\n # Check if resolution has the correct data format.\n if not isinstance(resolution, int) and not isinstance(resolution, numpy.int32):\n raise RuntimeError(\"The specified resolution needs to be an integer.\")\n\n out = None\n if sensor == \"TEMP\":\n vcc = 3.0\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\",\n \"swifter\", \"ddme_openbanplux\"]\n available_dev_2 = [\"bitalino\", \"bitalino_rev\", \"bitalino_riot\"]\n if option == \"Ohm\":\n if device in available_dev_1:\n out = (1e4 * raw_signal) / (2**resolution - raw_signal)\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n elif option == \"K\":\n a_0 = 1.12764514e-3\n a_1 = 2.34282709e-4\n a_2 = 8.77303013e-8\n out = 1 / (a_0 + a_1 * numpy.log(raw_to_phy(sensor, device, list(raw_signal),\n resolution, option=\"Ohm\", truncate=False)) + a_2 *\n ((numpy.log(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"Ohm\", truncate=False))) ** 3))\n elif option == \"C\":\n if device in available_dev_1:\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"K\", truncate=False)) - 273.15\n elif device in available_dev_2:\n out = ((raw_signal / (2 ** resolution)) * vcc - 0.5) * 100\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor == \"EMG\":\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\",\n \"swifter\", \"ddme_openbanplux\"]\n available_dev_2 = [\"bitalino\"]\n available_dev_3 = [\"bitalino_rev\", \"bitalino_riot\"]\n if option == \"mV\":\n if device in available_dev_1:\n vcc = 3.0\n offset = 0.5\n gain = 1\n elif device in available_dev_2:\n vcc = 3.3\n offset = 0.5\n gain = 1.008\n elif device in available_dev_3:\n vcc = 3.3\n offset = 0.5\n gain = 1.009\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n out = (raw_signal * vcc / (2 ** resolution) - vcc * offset) / gain\n\n elif option == \"V\":\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"mV\", truncate=False)) / 1000\n\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor == \"ECG\":\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\",\n \"swifter\", \"ddme_openbanplux\"]\n available_dev_2 = [\"bitalino\", \"bitalino_rev\", \"bitalino_riot\"]\n if option == \"mV\":\n if device in available_dev_1:\n vcc = 3.0\n offset = 0.5\n gain = 1.019\n elif device in available_dev_2:\n vcc = 3.3\n offset = 0.5\n gain = 1.1\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n out = (raw_signal * vcc / (2 ** resolution) - vcc * offset) / gain\n\n elif option == \"V\":\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"mV\", truncate=False)) / 1000\n\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor == \"BVP\":\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\",\n \"swifter\", \"ddme_openbanplux\"]\n if option == \"uA\":\n vcc = 3.0\n if device in available_dev_1:\n offset = 0\n gain = 0.190060606\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n out = (raw_signal * vcc / (2 ** resolution) - vcc * offset) / gain\n\n elif option == \"A\":\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"uA\", truncate=False)) * 1e-6\n\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor in [\"SpO2.ARM\", \"SpO2.HEAD\", \"SpO2.FING\"]:\n available_dev_1 = [\"channeller\", \"biosignalsplux\", \"swifter\"]\n\n scale_factor = None\n if \"ARM\" in sensor or \"FING\" in sensor:\n scale_factor = 1.2\n elif \"HEAD\" in sensor:\n scale_factor = 0.15\n\n if option == \"uA\":\n if device in available_dev_1:\n out = scale_factor * (raw_signal / (2 ** resolution))\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n\n elif option == \"A\":\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"uA\", truncate=False)) * 1e-6\n\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor == \"ACC\":\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\",\n \"swifter\", \"ddme_openbanplux\"]\n\n available_dev_2 = [\"bitalino_rev\", \"bitalino_riot\"]\n if option == \"g\":\n if device in available_dev_1:\n Cm = 28000.0\n CM = 38000.0\n\n out = 2.0 * ((2 ** (16.0 - resolution) * raw_signal - Cm) / (CM - Cm)) - 1.0\n\n elif device in available_dev_2:\n\n # for bitalino the default calibration values are\n # for 6bit channel:\n # Cmax = 38\n # Cmin = 25\n # for 10bit channel:\n # Cmax = 608\n # Cmin = 400\n Cm = 400.0 / math.pow(2.0, 10 - resolution)\n CM = 608.0 / math.pow(2.0, 10 - resolution)\n\n out = 2.0 * ((raw_signal - Cm) / (CM - Cm)) - 1.0\n\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n elif option == \"m/s^2\":\n\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,option=\"g\", truncate=False)) * g\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor == \"EEG\":\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\", \"swifter\",\n \"ddme_openbanplux\"]\n available_dev_2 = [\"bitalino_rev\", \"bitalino_riot\"]\n if option == \"uV\":\n if device in available_dev_1:\n vcc = 3.0\n offset = 0.5\n gain = 0.041990\n elif device in available_dev_2:\n vcc = 3.3\n offset = 0.5\n gain = 0.040000\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n out = ((raw_signal * vcc / (2 ** resolution)) - vcc * offset) / gain\n\n elif option == \"V\":\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"uV\", truncate=False)) * 1e6\n\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n elif sensor == \"EDA\":\n available_dev_1 = [\"bioplux\", \"bioplux_exp\", \"biosignalsplux\", \"rachimeter\", \"channeller\", \"swifter\",\n \"biosignalspluxsolo\"]\n available_dev_2 = [\"bitalino\"]\n available_dev_3 = [\"bitalino_rev\", \"bitalino_riot\"]\n if option == \"uS\":\n if device in available_dev_1:\n vcc = 3.0\n offset = 0\n gain = 0.12\n elif device in available_dev_2:\n # out = 1.0 / (1.0 - (raw_signal / (2 ** resolution)))\n\n return 1.0 / (1.0 - (raw_signal / (2 ** resolution))) # [_truncate_value(value) for value in out]\n elif device in available_dev_3:\n vcc = 3.3\n offset = 0\n gain = 0.132\n else:\n raise RuntimeError(\"The output specified unit does not have a defined transfer \"\n \"function for the used device.\")\n out = ((raw_signal * vcc / (2 ** resolution)) - vcc * offset) / gain\n\n elif option == \"S\":\n out = numpy.array(raw_to_phy(sensor, device, list(raw_signal), resolution,\n option=\"uS\", truncate=False)) * 1e6\n\n else:\n raise RuntimeError(\"The selected output unit is invalid for the sensor under analysis.\")\n\n else:\n raise RuntimeError(\"The specified sensor is not valid or for now is not available for unit \"\n \"conversion.\")\n\n # truncate the sensor data\n # i.e. truncate 1.2081179279999996 to 1.2081179279\n if truncate:\n\n # get the precision needed (by calculating the least significant bit / the smallest step size achievable)\n precision = _calc_value_precision(device, resolution)\n\n out = numpy.array([_truncate_value(value, precision) for value in out])\n\n return out\n\n\ndef generate_time(signal, sample_rate=1000):\n \"\"\"\n -----\n Brief\n -----\n Function intended to generate a time axis of the input signal.\n\n -----------\n Description\n -----------\n The time axis generated by the acquisition process originates a set of consecutive values that represents the\n advancement of time, but does not have specific units.\n\n Once the acquisitions are made with specific sampling frequencies, it is possible to calculate the time instant\n of each sample by multiplying that value by the sampling frequency.\n\n The current function maps the values in the file produced by Opensignals to their real temporal values.\n\n ----------\n Parameters\n ----------\n signal : list\n List with the signal samples.\n\n sample_rate : int\n Sampling frequency of acquisition.\n\n Returns\n -------\n out : list\n Time axis with each list entry in seconds.\n \"\"\"\n\n # Download of signal if the input is a url.\n if _is_a_url(signal):\n # Check if it is a Google Drive sharable link.\n if \"drive.google\" in signal:\n signal = _generate_download_google_link(signal)\n data = load(signal, remote=True)\n key_level_1 = list(data.keys())[0]\n if \"00:\" in key_level_1:\n mac = key_level_1\n chn = list(data[mac].keys())[0]\n signal = data[mac][chn]\n else:\n chn = key_level_1\n signal = data[chn]\n\n nbr_of_samples = len(signal)\n end_of_time = nbr_of_samples / sample_rate\n\n # calculate the precision needed\n precision = _calc_time_precision(sample_rate)\n\n # ================================= Generation of the Time Axis ===============================\n time_axis = numpy.linspace(0, end_of_time, nbr_of_samples)\n\n time_axis = [_truncate_time(value, precision) for value in time_axis]\n\n return list(time_axis)\n\n\n# 25/09/2018 18h58m :)\n", "sub_path": "biosignalsnotebooks/biosignalsnotebooks/conversion.py", "file_name": "conversion.py", "file_ext": "py", "file_size_in_byte": 15141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 231, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 258, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 268, "usage_type": "call"}, {"api_name": "scipy.constants.g", "line_number": 268, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 321, "usage_type": "call"}, {"api_name": "aux_functions._calc_value_precision", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 338, "usage_type": "call"}, {"api_name": "aux_functions._truncate_value", "line_number": 338, "usage_type": "call"}, {"api_name": "aux_functions._is_a_url", "line_number": 377, "usage_type": "call"}, {"api_name": "aux_functions._generate_download_google_link", "line_number": 380, "usage_type": "call"}, {"api_name": "load.load", "line_number": 381, "usage_type": "call"}, {"api_name": "aux_functions._calc_time_precision", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 398, "usage_type": "call"}, {"api_name": "aux_functions._truncate_time", "line_number": 400, "usage_type": "call"}]} {"seq_id": "273221635", "text": "import datetime\nimport re\nfrom functools import cmp_to_key\n\nfrom useful import input_from\n\n\ndef hour_and_min_from_hh_mm(hh_mm_string):\n \"\"\"\n '00:29' => (0, 29)\n \"\"\"\n hour, minute = (int(st) for st in hh_mm_string.split(':'))\n return hour, minute\n\n\ndef year_month_day_from(yyyy_mm_dd):\n return yyyy_mm_dd.split('-')\n\n\ndef extract_timestamp_values(record):\n \"\"\"\n Returns a record as a tuple of the timestamp and the event\n \"\"\"\n values = re.match(r'^(\\[.+\\]) (.+)$', record)\n date_and_min, event = values.group(1), values.group(2)\n return (date_and_min, event)\n\n\ndef date_and_min_from(timestamp):\n \"\"\"\n Strip square brackets and separate timestamp into date and time\n \"\"\"\n date, minute = timestamp[1:-1].split()\n return (date, minute)\n\n\ndef records_to_guard_shifts(records):\n \"\"\"\n Transforms a list of records to a dictionary of guard ID's with their minutes worked\n sorted by date and time in descending order\n \"\"\"\n\n def time_sorted_records(records):\n \"\"\"\n Sorts a list of records by time in ascending order (earliest to latest)\n \"\"\"\n\n def key(record):\n year, month, day = (int(st) for st in year_month_day_from(\n date_and_min_from(extract_timestamp_values(record)[0])[0]))\n hour, minute = hour_and_min_from_hh_mm(\n date_and_min_from(extract_timestamp_values(record)[0])[1])\n return datetime.datetime(\n year, month, day, hour=hour, minute=minute)\n\n return sorted(records, key=key)\n\n def guard_id_from(event):\n return event.split()[1]\n\n chronological_records = time_sorted_records(records)\n\n guard_ids_with_minutes_asleep = {}\n guard_on_shift = ''\n minute_last_awake = 0\n minute_woken = 0\n for record in chronological_records:\n date_and_min, event = extract_timestamp_values(record)\n minute = date_and_min_from(date_and_min)[1]\n if 'Guard' in event:\n guard_id = guard_id_from(event)\n if guard_id not in guard_ids_with_minutes_asleep:\n guard_ids_with_minutes_asleep.update({\n guard_id: {i: 0\n for i in range(60)}\n })\n guard_on_shift = guard_id\n if event == 'falls asleep':\n minute_last_awake = hour_and_min_from_hh_mm(minute)[1]\n if event == 'wakes up':\n minute_woken = hour_and_min_from_hh_mm(minute)[1]\n for minute in range(minute_last_awake, minute_woken):\n guard_ids_with_minutes_asleep[guard_on_shift][minute] += 1\n\n return guard_ids_with_minutes_asleep\n\n\ndef id_and_minute_of_most_asleep(records):\n guard_times = records_to_guard_shifts(records)\n\n most_asleep_guard = max(\n [(id_num, sum(minutes_asleep.values()))\n for (id_num, minutes_asleep) in guard_times.items()],\n key=lambda x: x[1])[0]\n minute_most_asleep = max(\n guard_times[most_asleep_guard].items(), key=lambda x: x[1])[0]\n\n return (most_asleep_guard, minute_most_asleep)\n\n\ndef id_and_minute_of_most_frequently_asleep_on_minute(records):\n guard_times = records_to_guard_shifts(records)\n most_asleep_guard = max(guard_times.items(), key=lambda x: x[0])[0]\n minute_most_asleep = max(guard_times[most_asleep_guard].items(), key=lambda x: x[1])[0]\n return (most_asleep_guard, minute_most_asleep)\n\n\ndef main():\n records = [timestamp.strip() for timestamp in input_from(4)]\n\n # Part 1 Answer\n guard_id, minute = id_and_minute_of_most_asleep(records)\n guard_id = int(guard_id[1:])\n print(f'Part 1: {guard_id * minute}')\n\n # Part 2 Answer\n guard_id, minute = id_and_minute_of_most_frequently_asleep_on_minute(records)\n guard_id = int(guard_id[1:])\n print(f'Part 2: {guard_id * minute}')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "day4.py", "file_name": "day4.py", "file_ext": "py", "file_size_in_byte": 3867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.match", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "call"}, {"api_name": "useful.input_from", "line_number": 109, "usage_type": "call"}]} {"seq_id": "576383435", "text": "import time\nimport json\nimport socket\nimport numpy as np\n\nfrom flask import Flask, render_template\nfrom flask_socketio import SocketIO\nfrom threading import Thread, Event\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\nthread = Thread()\nthread_stop_event = Event()\n\n\ndef event_stream():\n\n def get_magnitude(v):\n return np.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n host = \"192.168.1.77\"\n # host = \"10.211.136.20\"\n port = 3600\n s.connect((host, port))\n\n while not thread_stop_event.isSet():\n time.sleep(0.025)\n data = s.recv(256)\n if data:\n decoded_data = data.decode(\"utf-8\").split(\"\\n\")\n for msg in decoded_data:\n try:\n package = json.loads(msg)\n socketio.emit('serverResponse', {'timestamp': time.time(), 'data': get_magnitude(package[\"accelerometer\"][\"value\"])})\n\n except:\n continue\n\n\n@app.route('/')\ndef sessions():\n return render_template('index.html')\n\n\n@socketio.on('sendData')\ndef handle_my_custom_event(json):\n global thread\n print('Client connected')\n\n # Start the thread only if the thread has not been started before.\n if not thread.isAlive():\n print(\"Starting Thread\")\n thread = socketio.start_background_task(event_stream)\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)\n", "sub_path": "LP5_solutions/Goal_4/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 13, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 22, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 22, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 22, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}]} {"seq_id": "296079041", "text": "from flask import Flask, request\nfrom Utils import BotManager, DB, Configs\nfrom datetime import datetime, timedelta\n\napp = Flask(__name__)\n\nbot = BotManager.BotManager()\n\n\n@app.route('/set_webhook')\ndef index():\n \"\"\"This will set the bot web-hook. Should be called once.\"\"\"\n\n bot.set_webhook()\n return \"done\"\n\n\n@app.route('/web_hook', methods=[\"POST\"])\ndef web_hook():\n \"\"\"Telegram will send new messages to this web-hook\"\"\"\n\n # Translate POST request data to JSON format.\n j = request.json\n update = BotManager.BotManager.json_to_update(j)\n\n if not update.message.text:\n return \"bad request\"\n\n # If the request is `start` command.\n if update.message.text.lower() in [\"/start\", \"/start@sample_bot\"]:\n chat_id = update.message.chat_id\n d = DB\n d.store(str(chat_id), datetime.now().date())\n bot.send_message(update.message.chat_id, Configs.Message.START_TEXT)\n\n # If the request is `help` command.\n if update.message.text.lower() in [\"/help\", \"/help@sample_bot\"]:\n bot.send_message(update.message.chat_id, Configs.Message.HELP_TEXT)\n\n # If the request is `switchreg` command.\n # `switchreg` command will switch client registeration.\n # Unregistered cients won't be notified of new rss entries anymore until they re-register.\n if update.message.text.lower() in [\"/switchreg\", \"/switchreg@sample_bot\"]:\n chat_id = update.message.chat_id\n d = DB\n d.switch_registration(str(chat_id))\n bot.send_message(update.message.chat_id, 'Your registeration switched succesfully')\n\n # The request was processed successfully.\n return \"ok\"\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "bot/Bot.py", "file_name": "Bot.py", "file_ext": "py", "file_size_in_byte": 1686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "Utils.BotManager.BotManager", "line_number": 7, "usage_type": "call"}, {"api_name": "Utils.BotManager", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "Utils.BotManager.BotManager.json_to_update", "line_number": 24, "usage_type": "call"}, {"api_name": "Utils.BotManager.BotManager", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Utils.BotManager", "line_number": 24, "usage_type": "name"}, {"api_name": "Utils.DB", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "Utils.Configs.Message", "line_number": 34, "usage_type": "attribute"}, {"api_name": "Utils.Configs", "line_number": 34, "usage_type": "name"}, {"api_name": "Utils.Configs.Message", "line_number": 38, "usage_type": "attribute"}, {"api_name": "Utils.Configs", "line_number": 38, "usage_type": "name"}, {"api_name": "Utils.DB", "line_number": 45, "usage_type": "name"}]} {"seq_id": "226328405", "text": "__title__ = \"OCR POC\"\n__author__ = \"Brendan Lauck\"\n__maintainer__ = \"Brendan Lauck\"\n__email__ = \"v-brlauc@microsoft.com\"\n__status__ = \"development\"\n\nimport cv2 as cv\nimport main\nimport sys\nimport utils\n\n\nif __name__ == \"__main__\":\n \"\"\"PROCESS INPUT, LOAD IMAGE, AND GET TABLE FROM IMAGE\"\"\"\n if len(sys.argv) == 2:\n imagePath = sys.argv[1]\n if imagePath[-4:] == \".pdf\":\n num_pages = utils.pdfToJpg(imagePath)\n for i in range(num_pages):\n img = cv.imread(\"out{}.jpg\".format(i + 1))\n main.getData_1(img)\n elif imagePath[-4:] == \".jpg\":\n img = cv.imread(imagePath)\n main.getData_1(img)\n else:\n print(\"Error: file must be in PDF or JPEG format\")\n else:\n print(\"Error: Usage: python get-table.py \")\n \n", "sub_path": "get-table.py", "file_name": "get-table.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "utils.pdfToJpg", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 20, "usage_type": "call"}, {"api_name": "main.getData_1", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "main.getData_1", "line_number": 24, "usage_type": "call"}]} {"seq_id": "160079696", "text": "# Recurrent Neural Networks (RNN)\n\n#----------------------\n# Data Preprocessing\n#----------------------\n# Import the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Import the training set\ndataset_train = pd.read_csv('Google_Stock_Price_Train.csv')\ntraining_set = dataset_train.iloc[:, 1:2].values # Only want the open column but want it as a numpy array\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler()\ntraining_set_scaled = sc.fit_transform(training_set)\n\n# Create data structure with 60 timesteps and 1 output\nX_train = [] # Will contain the 60 previews timesteps (the inputs)\ny_train = [] # Will contain the outputs\nfor i in range(60, len(training_set_scaled)):\n X_train.append(training_set_scaled[i-60:i, 0]) # 0 specifies the column index\n y_train.append(training_set_scaled[i, 0])\n \n# Convert X_train & y_train into numpy arrays\nX_train, y_train = np.array(X_train), np.array(y_train)\n\n# Reshape data structure into 3D to fit into RNN\n# RNN takes an input tuple as: (batchsize, timesteps, input_dim)\n# (X_train.shape[0] (1,198) = batch_size; X_train.shape[1] (60) = timesteps; 1 = input_dim)\nreshaped_X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n\n#-------------------\n# Building the RNN\n#-------------------\n# Import Keras Libraries\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout\n\n# Initalize the RNN\nregressor = Sequential()\n\n# Adding the first LSTM layer and dropout\nregressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))\nregressor.add(Dropout(rate=0.2))\n\n# Adding the second LSTM layer and dropout\nregressor.add(LSTM(units=100, return_sequences=True))\nregressor.add(Dropout(rate=0.2))\n\n# Adding the third LSTM layer and dropout\nregressor.add(LSTM(units=100, return_sequences=True))\nregressor.add(Dropout(rate=0.2))\n\n# Adding the fourth LSTM layer and dropout\nregressor.add(LSTM(units=150))\nregressor.add(Dropout(rate=0.2))\n\n# Output Layer\nregressor.add(Dense(units=1))\n\n# Comping the RNN\nregressor.compile(optimizer='adam', loss='mean_squared_error')\n\n# Fitting to the training set\nregressor.fit(X_train, y_train, batch_size=32, epochs=100)\n\n\n#------------------------\n# Making the predictions \n#------------------------\n# Get the real stock price of 2017\ndataset_test = pd.read_csv('Google_Stock_Price_Test.csv')\nreal_stock_price = dataset_test.iloc[:, 1:2].values\n\n# Concat both of the datasets\ndataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis=0)\n\n# Get the inputs - used to predict the accuracy against the real stock prices\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values\n\n# Reshape the inputs into 1 column\ninputs = inputs.reshape(-1, 1)\n\n# Scale the inputs to match the training set\n# This has already been fitted to the training set so we only need to transform it\ninputs = sc.transform(inputs)\n\n# Create the data structure for the test set\nX_test = []\nfor i in range(60, len(inputs)):\n X_test.append(inputs[i-60:i, 0])\n \n# Convert to a numpy array\nX_test = np.array(X_test)\n \n# Reshape the data structure into a 3D shape\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n\n# Get the prediction stock price of 2017\npredicted_stock_price = regressor.predict(X_test)\n\n# Convert predictions back to readable format - no longer feature scaled\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)\n\n\n#------------------------\n# Visualise the results\n#------------------------\nplt.plot(real_stock_price, color='red', label='Real Google Stock Price')\nplt.plot(predicted_stock_price, color='blue', label='Predicted Google Stock Price')\nplt.title('Google Stock Price Prediction')\nplt.xlabel('Days')\nplt.ylabel('Google Stock Price')\nplt.legend()\nplt.show()", "sub_path": "deep-learning/rnn/rnn-keras.py", "file_name": "rnn-keras.py", "file_ext": "py", "file_size_in_byte": 3835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}]} {"seq_id": "8545562", "text": "import os\nfrom torch.utils.data import DataLoader\nfrom data.dataset import CamVidDataset\nfrom data.transforms import Compose, Resize, ToTensor, Normalize\n\n\ndef build_dataset(cfg, split='train', transforms=None):\n data_dir = os.path.expanduser(cfg.DATASET_DIR)\n if cfg.DATASET == 'CamVid':\n dataset = CamVidDataset(data_dir, split, transforms)\n else:\n raise NotImplementedError('Not support dataset: {}'.format(cfg.DATASET))\n return dataset\n\n\ndef build_transforms(cfg):\n transforms = Compose([\n Resize(cfg.TRANSFORMS.RESIZE_SIZE),\n ToTensor(),\n Normalize(mean=cfg.TRANSFORMS.MEAN, std=cfg.TRANSFORMS.STD)\n ])\n return transforms\n\n\ndef build_data_loader(cfg, split='train'):\n num_workers = cfg.DATA_LOADER.NUM_WORKERS\n if split == 'train' or split == 'val':\n shuffle = True\n drop_last = True\n else:\n shuffle = False\n drop_last = False\n transforms = build_transforms(cfg)\n dataset = build_dataset(cfg, split, transforms)\n data_loader = DataLoader(dataset, batch_size=cfg.DATA_LOADER.BS, shuffle=shuffle,\n num_workers=num_workers, drop_last=drop_last)\n return data_loader\n", "sub_path": "data/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.path.expanduser", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "data.dataset.CamVidDataset", "line_number": 10, "usage_type": "call"}, {"api_name": "data.transforms.Compose", "line_number": 17, "usage_type": "call"}, {"api_name": "data.transforms.Resize", "line_number": 18, "usage_type": "call"}, {"api_name": "data.transforms.ToTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "data.transforms.Normalize", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 35, "usage_type": "call"}]} {"seq_id": "645567765", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport os\nfrom mock import Mock, patch, MagicMock\nimport requests\nfrom django.conf import settings\nfrom django.test import TransactionTestCase\nfrom string import Template\nfrom ..wfs import WFSToGPKG\nfrom uuid import uuid4\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestWFSToGPKG(TransactionTestCase):\n\n def setUp(self, ):\n self.path = settings.ABS_PATH()\n self.task_process_patcher = patch('eventkit_cloud.utils.wfs.TaskProcess')\n self.task_process = self.task_process_patcher.start()\n self.addCleanup(self.task_process_patcher.stop)\n self.task_uid = uuid4()\n\n @patch('eventkit_cloud.utils.wfs.check_content_exists')\n @patch('eventkit_cloud.utils.wfs.os.path.exists')\n def test_create_convert(self, exists, check_content_exists):\n gpkg = '/path/to/sqlite.gpkg'\n bbox = [-45, -45, 45, 45]\n layer = 'awesomeLayer'\n name = 'Great export'\n service_url = 'http://my-service.org/some-server/wfs?'\n expected_url = '{}{}'.format(service_url.rstrip('?'), '?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME={}&SRSNAME=EPSG:4326'.format(layer))\n cmd = Template(\"ogr2ogr -skipfailures -spat $minX $minY $maxX $maxY -f GPKG $gpkg WFS:'$url'\")\n cmd = cmd.safe_substitute({'gpkg': gpkg, 'url': expected_url, 'minX': bbox[0], 'minY': bbox[1], 'maxX': bbox[2], 'maxY': bbox[3]})\n exists.return_value = True\n check_content_exists.return_value = True\n self.task_process.return_value = Mock(exitcode=0)\n # set zipped to False for testing\n w2g = WFSToGPKG(gpkg=gpkg,\n bbox=bbox,\n service_url=service_url,\n layer=layer,\n debug=False,\n name=name,\n service_type=None,\n task_uid=self.task_uid)\n out = w2g.convert()\n self.task_process.assert_called_once_with(task_uid=self.task_uid)\n exists.assert_called_once_with(os.path.dirname(gpkg))\n self.task_process().start_process.assert_called_once_with(cmd, executable='/bin/sh', shell=True, stderr=-1,\n stdout=-1)\n self.assertEquals(out, gpkg)\n\n self.task_process.return_value = Mock(exitcode=1)\n with self.assertRaises(Exception):\n w2g.convert()\n\n\n", "sub_path": "eventkit_cloud/utils/tests/test_wfs.py", "file_name": "test_wfs.py", "file_ext": "py", "file_size_in_byte": 2439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "django.test.TransactionTestCase", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.settings.ABS_PATH", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 19, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 22, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 33, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 37, "usage_type": "call"}, {"api_name": "wfs.WFSToGPKG", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 54, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 24, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 25, "usage_type": "call"}]} {"seq_id": "536909333", "text": "#!/usr/bin/env python\n#\n# GrovePi Example for using the Grove PIR Motion Sensor (http://www.seeedstudio.com/wiki/Grove_-_PIR_Motion_Sensor)\n#\n# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi\n#\n# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi\n#\n'''\n## License\n\nThe MIT License (MIT)\n\nGrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.\nCopyright (C) 2017 Dexter Industries\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without modetriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPmodeS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n'''\n# NOTE:\n# \tThere are also 2x potentiometers on the board for adjusting measuring distance and hold time\n# \tRotate the pot labelled \"Delay time\" clockwise to decrease the hold time (0.3s - 25s)\n# \tRotate the pot labelled \"Distance\" clockwise to decrease the measuring distance (10cm - 6m)\n\t\n# \tThere are multiple revisions of this board with different components for setting retriggerable/non-retriggerable.\n# \tRevision 1.0 contains a switch and revision 1.2 contains a jumper hat.\n# \tThe 1.0 switch board is labelled with H,L - H=retriggerable, L=non-retriggerable.\n# \tThe 1.2 jumper board has a pin diagram printed on the back.\n\t\n# \tretriggerable means the sensor will continue outputting high if motion was detected before the hold timer expimode.\n# \tnon-retriggerable means the sensor will output high for the specified hold time only, then output low until motion is detected again.\n# \tif there is constant motion detected, retriggerable will stay high for the mode and non-retriggerable will oscillate between high/low.\n\nimport sys\nimport time\nimport datetime\nimport grovepi\nimport logging\nimport os\nimport argparse\n\n# Connect the Grove PIR Motion Sensor to digital port D8\n# NOTE: Some PIR sensors come with the SIG line connected to the yellow wire and some with the SIG line connected to the white wire.\n# If the example does not work on the first run, try changing the pin number\n# For example, for port D8, if pin 8 does not work below, change it to pin 7, since each port has 2 digital pins.\n# For port 4, this would pin 3 and 4\n\ndef execute(mode):\n\ttry:\n\t\t# Sense motion, usually human, within the target range\n\t\tmotion= grovepi.pirRead(pir_sensor, options[mode][1])\n\t\tif motion==0 or motion==1:\t# check if reads were 0 or 1 it can be 255 also because of IO Errors so remove those values\n\t\t\tif motion==1:\n\t\t\t\tif args.led == 1:\n\t\t\t\t\tgrovepi.digitalWrite(led, 1)\n\t\t\t\tprint ('Motion Detected', time.ctime())\n\t\t\t\tlogging.info('Motion Detected')\n\t\t\telse:\n\t\t\t\tgrovepi.digitalWrite(led, 0)\n\t\t\t\tprint ('-')\n\texcept Exception as e:\n\t\tprint(\"Unexpected error:\", e)\n\t\tlogging.error('Unexpected error: %s', e)\n\t\traise\n\ndef getSetting():\n\twhile True:\n\t\tmode = input(\"\\nSelect a sensitivity level from [1 - 3]: \\n \\t 1. High \\n \\t 2. Medium \\n \\t 3. Low \\n\")\n\t\tif mode.isdigit():\n\t\t\tmode = int(mode)\n\t\t\tif (1 <= mode <= 3):\n\t\t\t\tprint(\"The program will run on {0} setting...\\n\".format(options[mode][0]))\n\t\t\t\treturn mode\n\t\t\telse:\n\t\t\t\tprint(\"'{0}' is an invalid number.\\n \\n\".format(mode))\n\t\telse:\n\t\t\tprint(\"'{0}' is not a number.\\n \\n\".format(mode))\n\ndef instant():\n\tmode = getSetting()\n\twhile True:\n\t\texecute(mode)\n\ndef setTimer():\n\tcurr = datetime.datetime.now()\n\tif args.timer:\n\t\tprint(\"Timer:\"+str(args.timer)+\" Minute(s)\")\n\t\ttime = args.timer\n\telse:\n\t\ttime = int(input(\"How long would you like the program to run (in minutes): \\n\"))\n\t\t\n\tif args.mode:\n\t\tmode = args.mode\t\t\n\t\tif (1 <= mode <= 3):\t\t\n\t\t\tprint(\"Mode:\"+str(args.mode))\n\t\telse:\n\t\t\tprint(\"'{0}' is an invalid number.\\n \\n\".format(mode))\n\t\t\tparser.print_help(sys.stderr)\n\t\t\tsys.exit(1)\n\telse:\n\t\tmode = getSetting()\n\n\tstop = curr + datetime.timedelta(minutes=time)\n\twhile datetime.datetime.now() < stop:\n\t\texecute(mode)\n\t\n\tprint(\"Program completed!\")\n\tlogging.info('Timer stopped. Exiting.')\n\ndef keyboardInterrupt():\n\tprint(\"\\nexiting due to keyboard interrupt\\n\")\n\tlogging.info('Exiting due to keyboard interrupt')\n\n\ndef main():\n\ttry:\n\t\tif args.timer:\n\t\t\tmodes[1][1]()\n\t\telse:\t\n\t\t\ttime = input(\"Do you want to set a timer? Y/n \\n\")\n\t\t\tif time.upper() == \"Y\":\n\t\t\t\tprint(\"The program will run with a timer...\\n\".format(modes[1][0]))\n\t\t\t\tmodes[1][1]()\n\t\t\telse:\n\t\t\t\tmodes[2][1]()\n\n\texcept KeyboardInterrupt:\n\t\tgrovepi.digitalWrite(led, 0)\n\t\tkeyboardInterrupt()\n\n\nif __name__ == \"__main__\":\n\n\tpir_sensor = 8\n\tled = 4\n\tmotion=0\n\tgrovepi.pinMode(pir_sensor,\"INPUT\")\n\tgrovepi.pinMode(led, \"OUTPUT\")\n\n\t#Get the path from which the script is executing\n\tscript = os.path.realpath(__file__)\n\tpath = os.path.dirname(script)\n\n\t#Configure the Python logger\n\tlogging.basicConfig(filename= path + '/grove_pir_motion_sensor/grove_pir_motion_sensor.log', \n\t\tformat='%(asctime)s - %(levelname)s - %(message)s',\n\t\tlevel=logging.INFO)\n\n\t# From https://stackoverflow.com/questions/40419139/argparse-add-example-usage\n\texample_text = '''example:\n\n\tpython3 grove_pir_motion_sensor_modified.py -m 1\n\tpython3 grove_pir_motion_sensor_modified.py -t 20 -m 1\n\tpython3 grove_pir_motion_sensor_modified.py -t 20 -m 1 -l 0'''\n\t# END\n\n\tparser = argparse.ArgumentParser(prog='PIR motion sensor', description='Configurable motion sensor',epilog=example_text,formatter_class=argparse.RawDescriptionHelpFormatter)\n\tparser.add_argument('-t', '--timer', nargs='?', type=int, metavar='timer', help=\"Set timer\")\n\tparser.add_argument('-m', '--mode', nargs='?', type=int, metavar='mode', help=\"Sensitivity level from [1 - 3]: \\n \\t 1. High \\n \\t 2. Medium \\n \\t 3. Low \\n\")\n\tparser.add_argument('-l', '--led', nargs='?', type=int, metavar='led', help=\"Set LED: \\n 0. Off \\n \\t 1. On \\n \")\n\targs = parser.parse_args()\n\n\tmodes = {1: [\"Set Timer\", setTimer], 2: [\"Instant\", instant]}\n\toptions = {1: [\"High\", .2], 2: [\"Medium\", 1.2], 3: [\"Low\", 2]}\n\n\tmain()\n\n", "sub_path": "Software/Python/grove_pir_motion_sensor_modified.py", "file_name": "grove_pir_motion_sensor_modified.py", "file_ext": "py", "file_size_in_byte": 6776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "grovepi.pirRead", "line_number": 66, "usage_type": "call"}, {"api_name": "grovepi.digitalWrite", "line_number": 70, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 72, "usage_type": "call"}, {"api_name": "grovepi.digitalWrite", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 127, "usage_type": "call"}, {"api_name": "time.upper", "line_number": 136, "usage_type": "call"}, {"api_name": "grovepi.digitalWrite", "line_number": 143, "usage_type": "call"}, {"api_name": "grovepi.pinMode", "line_number": 152, "usage_type": "call"}, {"api_name": "grovepi.pinMode", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 160, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 162, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 172, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 172, "usage_type": "attribute"}]} {"seq_id": "651334090", "text": "# coding: utf-8\n\n\"\"\"\n Carbon DLS API\n\n Welcome to the Carbon DLS API docs! You can find all relevant documentation here: https://github.com/carbon3d/carbon3d-api # noqa: E501\n\n The version of the OpenAPI document: 0.0.8\n Contact: api-list@carbon3d.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom carbon3d.api_client import ApiClient\nfrom carbon3d.exceptions import ( # noqa: F401\n ApiTypeError,\n ApiValueError\n)\n\n\nclass PrintsApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def get_prints(self, limit, offset, **kwargs): # noqa: E501\n \"\"\"List finished prints information # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_prints(limit, offset, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param int limit: Max records to return (required)\n :param int offset: Number of items to skip (required)\n :param str platform_serial: Platform used for print\n :param str printer_serial: Serial of the Printer used to print\n :param str printer_name: Name of the Printer used to print\n :param str print_order_uuid: UUID of the print_order submitted\n :param str print_order_number: Number of the print_order submitted\n :param str started_before: Print started before timestamp (inclusive)\n :param str started_after: Print started after timestamp (inclusive)\n :param str finished_before: Print finished before timestamp (inclusive)\n :param str finished_after: Print finished after timestamp (inclusive)\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: PrintsResponse\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.get_prints_with_http_info(limit, offset, **kwargs) # noqa: E501\n\n def get_prints_with_http_info(self, limit, offset, **kwargs): # noqa: E501\n \"\"\"List finished prints information # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_prints_with_http_info(limit, offset, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param int limit: Max records to return (required)\n :param int offset: Number of items to skip (required)\n :param str platform_serial: Platform used for print\n :param str printer_serial: Serial of the Printer used to print\n :param str printer_name: Name of the Printer used to print\n :param str print_order_uuid: UUID of the print_order submitted\n :param str print_order_number: Number of the print_order submitted\n :param str started_before: Print started before timestamp (inclusive)\n :param str started_after: Print started after timestamp (inclusive)\n :param str finished_before: Print finished before timestamp (inclusive)\n :param str finished_after: Print finished after timestamp (inclusive)\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(PrintsResponse, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n local_var_params = locals()\n\n all_params = [\n 'limit',\n 'offset',\n 'platform_serial',\n 'printer_serial',\n 'printer_name',\n 'print_order_uuid',\n 'print_order_number',\n 'started_before',\n 'started_after',\n 'finished_before',\n 'finished_after'\n ]\n all_params.extend(\n [\n 'async_req',\n '_return_http_data_only',\n '_preload_content',\n '_request_timeout'\n ]\n )\n\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_prints\" % key\n )\n local_var_params[key] = val\n del local_var_params['kwargs']\n # verify the required parameter 'limit' is set\n if self.api_client.client_side_validation and ('limit' not in local_var_params or # noqa: E501\n local_var_params['limit'] is None): # noqa: E501\n raise ApiValueError(\"Missing the required parameter `limit` when calling `get_prints`\") # noqa: E501\n # verify the required parameter 'offset' is set\n if self.api_client.client_side_validation and ('offset' not in local_var_params or # noqa: E501\n local_var_params['offset'] is None): # noqa: E501\n raise ApiValueError(\"Missing the required parameter `offset` when calling `get_prints`\") # noqa: E501\n\n if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000: # noqa: E501\n raise ApiValueError(\"Invalid value for parameter `limit` when calling `get_prints`, must be a value less than or equal to `1000`\") # noqa: E501\n if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501\n raise ApiValueError(\"Invalid value for parameter `limit` when calling `get_prints`, must be a value greater than or equal to `1`\") # noqa: E501\n if self.api_client.client_side_validation and 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501\n raise ApiValueError(\"Invalid value for parameter `offset` when calling `get_prints`, must be a value greater than or equal to `0`\") # noqa: E501\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'platform_serial' in local_var_params and local_var_params['platform_serial'] is not None: # noqa: E501\n query_params.append(('platform_serial', local_var_params['platform_serial'])) # noqa: E501\n if 'printer_serial' in local_var_params and local_var_params['printer_serial'] is not None: # noqa: E501\n query_params.append(('printer_serial', local_var_params['printer_serial'])) # noqa: E501\n if 'printer_name' in local_var_params and local_var_params['printer_name'] is not None: # noqa: E501\n query_params.append(('printer_name', local_var_params['printer_name'])) # noqa: E501\n if 'print_order_uuid' in local_var_params and local_var_params['print_order_uuid'] is not None: # noqa: E501\n query_params.append(('print_order_uuid', local_var_params['print_order_uuid'])) # noqa: E501\n if 'print_order_number' in local_var_params and local_var_params['print_order_number'] is not None: # noqa: E501\n query_params.append(('print_order_number', local_var_params['print_order_number'])) # noqa: E501\n if 'started_before' in local_var_params and local_var_params['started_before'] is not None: # noqa: E501\n query_params.append(('started_before', local_var_params['started_before'])) # noqa: E501\n if 'started_after' in local_var_params and local_var_params['started_after'] is not None: # noqa: E501\n query_params.append(('started_after', local_var_params['started_after'])) # noqa: E501\n if 'finished_before' in local_var_params and local_var_params['finished_before'] is not None: # noqa: E501\n query_params.append(('finished_before', local_var_params['finished_before'])) # noqa: E501\n if 'finished_after' in local_var_params and local_var_params['finished_after'] is not None: # noqa: E501\n query_params.append(('finished_after', local_var_params['finished_after'])) # noqa: E501\n if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501\n query_params.append(('limit', local_var_params['limit'])) # noqa: E501\n if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501\n query_params.append(('offset', local_var_params['offset'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/prints', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PrintsResponse', # noqa: E501\n auth_settings=auth_settings,\n async_req=local_var_params.get('async_req'),\n _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501\n _preload_content=local_var_params.get('_preload_content', True),\n _request_timeout=local_var_params.get('_request_timeout'),\n collection_formats=collection_formats)\n", "sub_path": "v1/python/carbon3d/api/prints_api.py", "file_name": "prints_api.py", "file_ext": "py", "file_size_in_byte": 10984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "carbon3d.api_client.ApiClient", "line_number": 37, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 132, "usage_type": "call"}, {"api_name": "carbon3d.exceptions.ApiTypeError", "line_number": 134, "usage_type": "call"}, {"api_name": "carbon3d.exceptions.ApiValueError", "line_number": 143, "usage_type": "call"}, {"api_name": "carbon3d.exceptions.ApiValueError", "line_number": 147, "usage_type": "call"}, {"api_name": "carbon3d.exceptions.ApiValueError", "line_number": 150, "usage_type": "call"}, {"api_name": "carbon3d.exceptions.ApiValueError", "line_number": 152, "usage_type": "call"}, {"api_name": "carbon3d.exceptions.ApiValueError", "line_number": 154, "usage_type": "call"}]} {"seq_id": "322969893", "text": "import os\nimport logging\nimport subprocess\nimport pymysql\nimport errno\nimport importlib\n\nfrom airone.lib.log import Logger\n\npymysql.install_as_MySQLdb()\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '(ch@ngeMe)'\n\n# Celery settings\nCELERY_BROKER_URL = 'amqp://guest:guest@localhost//'\n\n#: Only add pickle to this list if your broker is secured\n#: from unwanted access (see userguide/security.html)\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_RESULT_BACKEND = 'rpc://'\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_BROKER_HEARTBEAT = 0\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'common',\n 'user',\n 'group',\n 'entity',\n 'acl',\n 'dashboard',\n 'entry',\n 'job',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'import_export',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'custom_view.background',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'airone.urls'\n\nPROJECT_PATH = os.path.realpath(os.path.dirname(__file__))\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n PROJECT_PATH + '/../templates/',\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'airone.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'airone',\n 'USER': 'airone',\n 'PASSWORD': 'password',\n 'HOST': 'localhost',\n 'OPTIONS': {\n 'charset': 'utf8mb4',\n }\n }\n}\nDATABASE_ROUTERS = []\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Asia/Tokyo'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n PROJECT_PATH + '/../static/'\n]\n\nLOGIN_REDIRECT_URL = '/dashboard/'\n\n# global settins for AirOne\nAIRONE = {\n 'ENABLE_PROFILE': True,\n 'CONCURRENCY': 1,\n 'VERSION': 'unknown',\n 'FILE_STORE_PATH': '/tmp/airone_app',\n 'AUTO_COMPLEMENT_USER': 'auto_complementer',\n 'DB_SLAVES': ['default'],\n 'DB_MASTER': 'default',\n 'EXTENSIONS': [],\n}\n\n# load extension settings individually\nfor extension in AIRONE['EXTENSIONS']:\n try:\n importlib.import_module('%s.settings' % extension)\n except ImportError:\n Logger.warning('Failed to load settings %s' % extension)\n\ntry:\n proc = subprocess.Popen(\"cd %s && git describe --tags\" % BASE_DIR, shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n outs, errs = proc.communicate(timeout=1)\n # if `git describe --tags` prints some string to stdout, use the result as version\n # else use 'unknown' as version (e.g. untagged git repository)\n if outs != b'':\n AIRONE['VERSION'] = outs.strip()\n else:\n logging.getLogger(__name__).warning('could not describe airone version from git')\n\n # create a directory to store temporary file for applications\n if not os.path.exists(AIRONE['FILE_STORE_PATH']):\n os.makedirs(AIRONE['FILE_STORE_PATH'])\n\nexcept OSError as e:\n # errno.ENOENT is the errno of FileNotFoundError\n if e.errno == errno.ENOENT:\n # do nothing and use 'unknown' as version when git does not exists\n logging.getLogger(__name__).warning('git command not found.')\n\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': 'localhost:11211',\n 'TIMEOUT': None,\n }\n}\n\nES_CONFIG = {\n 'NODES': ['localhost:9200'],\n 'INDEX': 'airone',\n 'MAXIMUM_RESULTS_NUM': 500000,\n 'TIMEOUT': None\n}\n\n#\n# Note: Disable LDAP authentication by default in the mean time.\n#\n# AUTHENTICATION_BACKENDS = (\n# 'airone.auth.ldap.LDAPBackend',\n# )\n\nAUTH_CONFIG = {\n 'LDAP': {\n 'SERVER_ADDRESS': 'localhost',\n 'USER_FILTER': 'sn={username},ou=User,dc=example,dc=com'\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'all': {\n 'format': '\\t'.join([\n \"[%(levelname)s]\",\n \"asctime:%(asctime)s\",\n \"module:%(module)s\",\n \"message:%(message)s\",\n \"process:%(process)d\",\n \"thread:%(thread)d\",\n ])\n }\n },\n 'handlers': {\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(BASE_DIR, 'logs/django.log'),\n 'formatter': 'all'\n },\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'all'\n },\n },\n 'loggers': {\n 'airone': {\n 'handlers': ['file', 'console'],\n 'level': 'INFO',\n 'propagate': False,\n },\n 'django.server': {\n 'handlers': ['file', 'console'],\n 'level': 'INFO',\n 'propagate': False,\n }\n }\n}\n# If log dir is not exists create it.\nif not os.path.exists(os.path.dirname(LOGGING['handlers']['file']['filename'])):\n os.makedirs(os.path.dirname(LOGGING['handlers']['file']['filename']))\n", "sub_path": "airone/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 7087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pymysql.install_as_MySQLdb", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 73, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 170, "usage_type": "call"}, {"api_name": "airone.lib.log.Logger.warning", "line_number": 172, "usage_type": "call"}, {"api_name": "airone.lib.log.Logger", "line_number": 172, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 175, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 176, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 187, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 191, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path", "line_number": 244, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 267, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}]} {"seq_id": "595576815", "text": "from config.db_config import Session\nfrom clases.Cliente import Cliente\nfrom clases.Alquiler import Alquiler\nfrom clases.Inmueble import Inmueble\nfrom sqlalchemy import exc\n\nsession = Session()\n\n\ndef insert_in_db(objeto):\n session.add(objeto)\n session.commit()\n\n\ndef delete_from_db(objeto):\n session.delete(objeto)\n session.commit()\n\n\ndef update():\n session.commit()\n\n\ndef cliente_existe(cliente):\n existe = False\n if cliente is not None:\n if session.query(Cliente).filter(Cliente.dni == cliente.dni).count() == 1:\n existe = True\n return existe\n\n\ndef get_cliente():\n dni = int(input(\"DNI: \"))\n try:\n return session.query(Cliente).filter(Cliente.dni == dni).one()\n except exc.SQLAlchemyError:\n print(\"No existe un cliente de DNI \" + str(dni))\n return None\n\n\ndef get_inmueble(inmueble_id):\n return session.query(Inmueble).filter(Inmueble.inmuebleId == inmueble_id).one()\n\n\ndef get_alquiler(alquiler_id):\n return session.query(Alquiler).filter(Alquiler.alquilerId == alquiler_id).one()\n\n\ndef listar_clientes(filtro):\n lista_clientes = []\n if filtro == 1: # Filtrar por DNI\n cliente = get_cliente()\n if cliente_existe(cliente):\n lista_clientes.append(cliente)\n elif filtro == 2:\n apellido = str(input(\"Apellido: \"))\n lista_clientes = session.query(Cliente).filter(Cliente.apellido.ilike('%'+apellido+'%')).all()\n\n elif filtro == 3:\n lista_clientes = session.query(Cliente).all()\n\n return lista_clientes\n\n\ndef listar_inmuebles(cliente=0):\n if cliente != 0:\n return session.query(Inmueble).filter(Inmueble.alquilado == 0, Inmueble.propietario == cliente).all()\n else:\n return session.query(Inmueble).filter(Inmueble.alquilado == 0).order_by(Inmueble.inmuebleId).all()\n\n\ndef listar_alquileres(cliente, args):\n alquileres = []\n\n if args == 1 and cliente_existe(cliente): # Filtrar por dueรฑo\n alquileres = session.query(Alquiler).join(Inmueble).join(Cliente).filter(Cliente.dni == cliente.dni).all()\n\n elif args == 2 and cliente_existe(cliente): # Filtrar por inquilino\n alquileres = session.query(Alquiler).join(Cliente).filter(Cliente.dni == cliente.dni).all()\n\n elif args == 3:\n alquileres = session.query(Alquiler).order_by(Alquiler.fechainicio).all()\n\n return alquileres\n\n\ndef contar_alquileres(dni):\n\n return session.query(Alquiler).join(Cliente).filter(Cliente.dni == dni).count()\n\n\ndef contar_propiedades(cliente):\n return session.query(Inmueble).filter(Inmueble.propietario == cliente).count()\n", "sub_path": "utils/db_functions.py", "file_name": "db_functions.py", "file_ext": "py", "file_size_in_byte": 2603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "config.db_config.Session", "line_number": 7, "usage_type": "call"}, {"api_name": "clases.Cliente.Cliente", "line_number": 27, "usage_type": "argument"}, {"api_name": "clases.Cliente.Cliente.dni", "line_number": 27, "usage_type": "attribute"}, {"api_name": "clases.Cliente.Cliente", "line_number": 35, "usage_type": "argument"}, {"api_name": "clases.Cliente.Cliente.dni", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 36, "usage_type": "name"}, {"api_name": "clases.Inmueble.Inmueble", "line_number": 42, "usage_type": "argument"}, {"api_name": "clases.Inmueble.Inmueble.inmuebleId", "line_number": 42, "usage_type": "attribute"}, {"api_name": "clases.Alquiler.Alquiler", "line_number": 46, "usage_type": "argument"}, {"api_name": "clases.Alquiler.Alquiler.alquilerId", "line_number": 46, "usage_type": "attribute"}, {"api_name": "clases.Cliente.Cliente", "line_number": 57, "usage_type": "argument"}, {"api_name": "clases.Cliente.Cliente.apellido.ilike", "line_number": 57, "usage_type": "call"}, {"api_name": "clases.Cliente.Cliente.apellido", "line_number": 57, "usage_type": "attribute"}, {"api_name": "clases.Cliente.Cliente", "line_number": 60, "usage_type": "argument"}, {"api_name": "clases.Inmueble.Inmueble", "line_number": 67, "usage_type": "argument"}, {"api_name": "clases.Inmueble.Inmueble.alquilado", "line_number": 67, "usage_type": "attribute"}, {"api_name": "clases.Inmueble.Inmueble.propietario", "line_number": 67, "usage_type": "attribute"}, {"api_name": "clases.Inmueble.Inmueble", "line_number": 69, "usage_type": "argument"}, {"api_name": "clases.Inmueble.Inmueble.alquilado", "line_number": 69, "usage_type": "attribute"}, {"api_name": "clases.Inmueble.Inmueble.inmuebleId", "line_number": 69, "usage_type": "attribute"}, {"api_name": "clases.Cliente.Cliente", "line_number": 76, "usage_type": "argument"}, {"api_name": "clases.Inmueble.Inmueble", "line_number": 76, "usage_type": "argument"}, {"api_name": "clases.Alquiler.Alquiler", "line_number": 76, "usage_type": "argument"}, {"api_name": "clases.Cliente.Cliente.dni", "line_number": 76, "usage_type": "attribute"}, {"api_name": "clases.Cliente.Cliente", "line_number": 79, "usage_type": "argument"}, {"api_name": "clases.Alquiler.Alquiler", "line_number": 79, "usage_type": "argument"}, {"api_name": "clases.Cliente.Cliente.dni", "line_number": 79, "usage_type": "attribute"}, {"api_name": "clases.Alquiler.Alquiler", "line_number": 82, "usage_type": "argument"}, {"api_name": "clases.Alquiler.Alquiler.fechainicio", "line_number": 82, "usage_type": "attribute"}, {"api_name": "clases.Cliente.Cliente", "line_number": 89, "usage_type": "argument"}, {"api_name": "clases.Alquiler.Alquiler", "line_number": 89, "usage_type": "argument"}, {"api_name": "clases.Cliente.Cliente.dni", "line_number": 89, "usage_type": "attribute"}, {"api_name": "clases.Inmueble.Inmueble", "line_number": 93, "usage_type": "argument"}, {"api_name": "clases.Inmueble.Inmueble.propietario", "line_number": 93, "usage_type": "attribute"}]} {"seq_id": "59640995", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@author: zhaogao\n@license: (C) Copyright 2013-2018.\n@contact: 449628536@qq.com\n@software: learn-py\n@file: len147_ๅฏ่‡ชๅฎšไน‰ๅฑžๆ€ง็š„่ฃ…้ฅฐๅ™จ.py\n@time: 10/04/2018 11:24 AM\n'''\n\n# ไฝ ๆƒณๅ†™ไธ€ไธช่ฃ…้ฅฐๅ™จๆฅๅŒ…่ฃ…ไธ€ไธชๅ‡ฝๆ•ฐ๏ผŒๅนถไธ”ๅ…่ฎธ็”จๆˆทๆไพ›ๅ‚ๆ•ฐๅœจ่ฟ่กŒๆ—ถๆŽงๅˆถ่ฃ…้ฅฐๅ™จ่กŒไธบ\n# ๅผ•ๅ…ฅไธ€ไธช่ฎฟ้—ฎๅ‡ฝๆ•ฐ๏ผŒไฝฟ็”จ nolocal ๆฅไฟฎๆ”นๅ†…้ƒจๅ˜้‡ใ€‚็„ถๅŽ่ฟ™ไธช่ฎฟ้—ฎๅ‡ฝๆ•ฐ่ขซไฝœไธบไธ€ไธชๅฑžๆ€ง่ต‹ๅ€ผ็ป™ๅŒ…่ฃ…ๅ‡ฝๆ•ฐ\n\nfrom functools import wraps, partial\nimport logging\n\n\n# ๅฎž็”จ่ฃ…้ฅฐๅ™จๅฐ†ๅ‡ฝๆ•ฐ้™„ๅŠ ไธบobj็š„ๅฑžๆ€ง\ndef attach_wrapper(obj, func=None):\n if func is None:\n return partial(attach_wrapper, obj)\n setattr(obj, func.__name__, func)\n return func\n\n\ndef logged(level, name=None, message=None):\n '''\n ๅฐ†่ฎฐๅฝ•ๆทปๅŠ ๅˆฐๅŠŸ่ƒฝใ€‚ ๅฆ‚ๆžœๆฒกๆœ‰ๆŒ‡ๅฎšๅ็งฐๅ’Œๆถˆๆฏ๏ผŒๅˆ™้ป˜่ฎคไธบ่ฏฅๅ‡ฝๆ•ฐ็š„ๆจกๅ—ๅ’Œๅ็งฐใ€‚\n :param level:ๆ—ฅๅฟ—่ฎฐๅฝ•็บงๅˆซ\n :param name:่ฎฐๅฝ•ๅ™จๅ็งฐ\n :param message:ๆ—ฅๅฟ—ๆถˆๆฏ\n :return:\n '''\n\n def decorate(func):\n logname = name if name else func.__module__\n log = logging.getLogger(logname)\n logmsg = message if message else func.__name__\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n log.log(level, logmsg)\n return func(*args, **kwargs)\n\n # ๆทปๅŠ  setter ๆ–นๆณ•\n @attach_wrapper(wrapper)\n def set_level(newlevel):\n nonlocal level\n level = newlevel\n\n @attach_wrapper(wrapper)\n def set_message(newmsg):\n # ่ฎฟ้—ฎๅ‡ฝๆ•ฐๅ…่ฎธไฝฟ็”จ nonlocal ๆฅไฟฎๆ”นๅ‡ฝๆ•ฐๅ†…้ƒจ็š„ๅ˜้‡\n nonlocal logmsg\n logmsg = newmsg\n\n @attach_wrapper(wrapper)\n def get_level():\n return level\n\n return wrapper\n\n return decorate\n\n\n# ็คบไพ‹ไฝฟ็”จ\n@logged(logging.DEBUG)\ndef add(x, y):\n return x + y\n\n\n@logged(logging.CRITICAL, 'example')\ndef spam():\n print('spam')\n\n\n# ไธ‹้ขๆ˜ฏไบคไบ’็Žฏๅขƒไธ‹็š„ไฝฟ็”จไพ‹ๅญ:\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nadd(2, 3)\n\n# ๆ”นๅ˜ log ไฟกๆฏ\nadd.set_message('add called')\nadd(2, 3)\n\n# ๆ”นๅ˜ log ็ญ‰็บง\nadd.set_level(logging.WARNING)\nadd(2, 3)\n\n# ่ฟ˜ๆœ‰ไธ€ไธชไปคไบบๅƒๆƒŠ็š„ๅœฐๆ–นๆ˜ฏ่ฎฟ้—ฎๅ‡ฝๆ•ฐไผšๅœจๅคšๅฑ‚่ฃ…้ฅฐๅ™จ้—ดไผ ๆ’ญ (ๅฆ‚ๆžœไฝ ็š„่ฃ…้ฅฐๅ™จ้ƒฝไฝฟ ็”จไบ† @functools.wraps ๆณจ่งฃ)ใ€‚ไพ‹ๅฆ‚๏ผŒๅ‡่ฎพไฝ ๅผ•ๅ…ฅๅฆๅค–ไธ€ไธช่ฃ…้ฅฐๅ™จ\nimport time\n\n\ndef timethis(func):\n '''\n ๆŠฅๅ‘Šๆ‰ง่กŒๆ—ถ้—ด็š„่ฃ…้ฅฐๅ™จ\n :param func:\n :return:\n '''\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n print(func.__name__, end - start)\n return result\n\n return wrapper\n\n\n@timethis\n@logged(logging.DEBUG)\ndef countdown(n):\n while n > 0:\n n -= 1\n\n\n# ไฝ ไผšๅ‘็Žฐ่ฎฟ้—ฎๅ‡ฝๆ•ฐไพๆ—งๆœ‰ๆ•ˆ:\n\ncountdown(10000000)\ncountdown.set_level(logging.WARNING)\ncountdown.set_message('counting down to zero')\ncountdown(10000000)\n\n", "sub_path": "cook/len147_ๅฏ่‡ชๅฎšไน‰ๅฑžๆ€ง็š„่ฃ…้ฅฐๅ™จ.py", "file_name": "len147_ๅฏ่‡ชๅฎšไน‰ๅฑžๆ€ง็š„่ฃ…้ฅฐๅ™จ.py", "file_ext": "py", "file_size_in_byte": 2981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "functools.partial", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 73, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 81, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 89, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 103, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 115, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 124, "usage_type": "attribute"}]} {"seq_id": "568987974", "text": "import gocept.form.action\nimport xml.sax.saxutils\nimport zeit.cms.browser.form\nimport zeit.cms.content.browser.form\nimport zeit.cms.content.interfaces\nimport zeit.cms.interfaces\nimport zeit.content.quiz.interfaces\nimport zeit.content.quiz.quiz\nimport zope.cachedescriptors.property\nimport zope.formlib.form\nimport zope.traversing.browser.interfaces\nfrom zeit.content.quiz.i18n import MessageFactory as _\n\n\nclass Questions(object):\n\n title = _('Quiz overview')\n\n @zope.cachedescriptors.property.Lazy\n def metadata(self):\n return zeit.cms.content.interfaces.ICommonMetadata(self.context)\n\n def update(self):\n super(Questions, self).update()\n if 'apply' not in self.request.form:\n return\n questions = self.request.form.get('__quiz__')\n self.context.updateOrder(questions)\n for q_name in questions:\n answers = self.request.form.get(q_name, ())\n self.context[q_name].updateOrder(answers)\n\n\nclass QuizFormBase(object):\n\n form_fields = (\n zope.formlib.form.FormFields(\n zeit.cms.interfaces.ICMSContent,\n zeit.cms.content.interfaces.ICommonMetadata,\n ).omit('commentsAllowed') +\n zope.formlib.form.FormFields(\n zeit.content.quiz.interfaces.IQuiz).select('commentsAllowed'))\n\n\nclass AddQuiz(QuizFormBase,\n zeit.cms.content.browser.form.CommonMetadataAddForm):\n\n title = _(\"Add quiz\")\n factory = zeit.content.quiz.quiz.Quiz\n next_view = 'edit.html'\n\n\nclass EditQuiz(QuizFormBase,\n zeit.cms.content.browser.form.CommonMetadataEditForm):\n\n title = _(\"Edit quiz\")\n\n\nclass DisplayQuiz(QuizFormBase,\n zeit.cms.content.browser.form.CommonMetadataDisplayForm):\n\n title = _(\"View quiz\")\n\n\n# quiz content\n\n\nclass EditFormBase(zeit.cms.browser.form.EditForm):\n \"\"\"Base class for edit views of various sub-objects of a quiz.\n \"\"\"\n\n deleted = False\n\n @zope.formlib.form.action(\n _('Apply'), condition=zope.formlib.form.haveInputWidgets)\n def handle_edit_action(self, action, data):\n \"\"\"Mandatory overwrite to keep the action.\n\n When overwriting one action, we must overwrite *all* actions, since\n they are only stored locally and no super call is made to retrieve\n other actions. Thus, when we overwrite \"delete\", we must also overwrite\n \"apply\", otherwise this action will be forgotten.\n\n \"\"\"\n super(EditFormBase, self).handle_edit_action.success(data)\n\n @gocept.form.action.confirm(\n _('Delete'),\n name='delete',\n confirm_message=_('delete-item-confirmation',\n default=u'Really delete?'),\n condition=zope.formlib.form.haveInputWidgets,\n )\n def handle_delete(self, action, data):\n self.quiz = zeit.content.quiz.interfaces.IQuiz(self.context)\n parent = self.context.__parent__\n del parent[self.context.__name__]\n self.status = _('Item was deleted.')\n self.deleted = True\n\n def nextURL(self):\n if self.deleted:\n return self.url(self.quiz, '@@questions.html')\n return super(EditFormBase, self).nextURL()\n\n\n@zope.component.adapter(zeit.content.quiz.interfaces.IQuestion,\n zeit.cms.browser.interfaces.ICMSLayer)\n@zope.interface.implementer(zope.publisher.interfaces.browser.IBrowserView)\ndef question_display_title(context, request):\n if context.title:\n return xml.sax.saxutils.escape(context.title)\n return context.question\n\n\n@zope.component.adapter(zeit.content.quiz.interfaces.IAnswer,\n zeit.cms.browser.interfaces.ICMSLayer)\n@zope.interface.implementer(zope.publisher.interfaces.browser.IBrowserView)\ndef answer_display_title(context, request):\n if context.title:\n return xml.sax.saxutils.escape(context.title)\n return context.answer\n", "sub_path": "src/zeit/content/quiz/browser/quiz.py", "file_name": "quiz.py", "file_ext": "py", "file_size_in_byte": 3883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 17, "usage_type": "call"}, {"api_name": "zeit.cms.browser.form.cms.content.interfaces.ICommonMetadata", "line_number": 21, "usage_type": "call"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 21, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 21, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.cachedescriptors", "line_number": 19, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 19, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.formlib.form.FormFields", "line_number": 37, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.formlib", "line_number": 37, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 37, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 38, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 38, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 39, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 39, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.formlib.form.FormFields", "line_number": 41, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.formlib", "line_number": 41, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 41, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.content", "line_number": 42, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 42, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 46, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 46, "usage_type": "name"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 48, "usage_type": "call"}, {"api_name": "zeit.cms.browser.form.content", "line_number": 49, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 49, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 54, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 54, "usage_type": "name"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 56, "usage_type": "call"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 60, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 60, "usage_type": "name"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 62, "usage_type": "call"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 68, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 68, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.formlib.form.action", "line_number": 74, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.formlib", "line_number": 74, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 74, "usage_type": "name"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 75, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.formlib", "line_number": 75, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 75, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.content.quiz.interfaces.IQuiz", "line_number": 95, "usage_type": "call"}, {"api_name": "zeit.cms.browser.form.content", "line_number": 95, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 95, "usage_type": "name"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 98, "usage_type": "call"}, {"api_name": "gocept.form.action.form.action.confirm", "line_number": 87, "usage_type": "call"}, {"api_name": "gocept.form.action.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "gocept.form.action", "line_number": 87, "usage_type": "name"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 88, "usage_type": "call"}, {"api_name": "zeit.content.quiz.i18n.MessageFactory", "line_number": 90, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.formlib", "line_number": 92, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 92, "usage_type": "name"}, {"api_name": "xml.sax.saxutils.sax.saxutils.escape", "line_number": 112, "usage_type": "call"}, {"api_name": "xml.sax.saxutils.sax", "line_number": 112, "usage_type": "attribute"}, {"api_name": "xml.sax.saxutils", "line_number": 112, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.component.adapter", "line_number": 107, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.component", "line_number": 107, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 107, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.content", "line_number": 107, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 107, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 108, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 108, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.interface.implementer", "line_number": 109, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.interface", "line_number": 109, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 109, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.publisher", "line_number": 109, "usage_type": "attribute"}, {"api_name": "xml.sax.saxutils.sax.saxutils.escape", "line_number": 121, "usage_type": "call"}, {"api_name": "xml.sax.saxutils.sax", "line_number": 121, "usage_type": "attribute"}, {"api_name": "xml.sax.saxutils", "line_number": 121, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.component.adapter", "line_number": 116, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.component", "line_number": 116, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 116, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.content", "line_number": 116, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 116, "usage_type": "name"}, {"api_name": "zeit.cms.browser.form.cms", "line_number": 117, "usage_type": "attribute"}, {"api_name": "zeit.cms.browser.form", "line_number": 117, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.interface.implementer", "line_number": 118, "usage_type": "call"}, {"api_name": "zope.cachedescriptors.property.interface", "line_number": 118, "usage_type": "attribute"}, {"api_name": "zope.cachedescriptors.property", "line_number": 118, "usage_type": "name"}, {"api_name": "zope.cachedescriptors.property.publisher", "line_number": 118, "usage_type": "attribute"}]} {"seq_id": "81645427", "text": "# -*- coding: utf-8 -*-\nfrom django.template import loader, RequestContext\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import HttpResponseRedirect, HttpResponse, Http404\nfrom django.views.generic.list_detail import object_list\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom annoying.decorators import render_to\n\nfrom threadmail import threading_send_mail\n\nfrom conference.forms import ConferenceProfileFormset\nfrom feedback.views import feedback_views\nfrom article.models import ArticleItem, CategoryArticle\nfrom iskcon.models import STATUS_ORDER, Order\nfrom iskcon.forms import ProfileForm\nfrom ad_board.models import CategoryAdBoard, AdBoardItem\nfrom ad_board.forms import AddAdBoardForm\n\nfrom electronic_catalog.models import ElectronicCatalogItem\n\nfrom conference.conf import MAIL_DOMAIN\n\nfrom forum.models import Forum, Thread, Post, Subscription\n\n\nFORUM_PAGINATION = getattr(settings, 'FORUM_PAGINATION', 10)\nPAGINATION = getattr(settings, 'PAGINATION', 10)\n\n\n@render_to('index.html')\ndef index(request):\n if request.user.is_authenticated():\n cat_menu = CategoryArticle.activs.all()\n objs = ArticleItem.activs.filter(category__in = cat_menu)\n else:\n cat_menu = CategoryArticle.publics.all()\n objs = ArticleItem.publics.filter(category__in = cat_menu)\n \n return object_list( \n request,\n queryset = objs,\n paginate_by = PAGINATION,\n template_object_name = 'aticle',\n template_name = 'index.html',\n extra_context = {\n 'active':1,\n }\n )\n\ndef contacts(request):\n return feedback_views(request, 'contacts.html')\n \n \n##########################################################################\n##########################################################################\n\n@login_required\n@render_to('email.html')\ndef profile_email(request):\n from conference import conf\n server_url = conf.MAIL_SERVER_INTERFACE\n return HttpResponseRedirect(server_url)\n\n##########################################################################\n##########################################################################\n \n@login_required\ndef profile_change_password(request):\n if request.method == 'POST':\n form2 = PasswordChangeForm(user=request.user, data=request.POST)\n if form2.is_valid():\n form2.save()\n messages.add_message(request, messages.INFO, 'ะŸะฐั€ะพะปัŒ ัƒัะฟะตัˆะฝะพ ะธะทะผะตะฝะตะฝ.')\n return HttpResponseRedirect('/accounts/profile/')\n messages.add_message(request, messages.ERROR, 'ะžัˆะธะฑะบะฐ ะธะทะผะตะฝะตะฝะธั ะฟะฐั€ะพะปั!')\n return HttpResponseRedirect('/accounts/profile/')\n\n##########################################################################\n##########################################################################\n\n@login_required\ndef profile_views(request):\n help_text = u''\n uc = request.user.conference_profile\n if uc.is_server_email():\n help_text = u'ะกะตั€ะฒะตั€ %s. ะŸะฐั€ะพะปัŒ: %s' % (uc.mail_server_interface, uc.server_email_passw)\n \n if request.method == 'POST':\n form1 = ProfileForm(request.POST, request.FILES, instance=request.user)\n formset1 = ConferenceProfileFormset(request.POST, instance=request.user)\n \n if form1.is_valid() and formset1.is_valid():\n form1.save()\n formset1.save()\n messages.add_message(request, messages.INFO, 'ะ”ะฐะฝะฝั‹ะต ัะพั…ั€ะฐะฝะตะฝั‹.')\n return HttpResponseRedirect('/accounts/profile/')\n else:\n form1 = ProfileForm(instance=request.user, initial={'last_name':request.user.last_name, 'first_name':request.user.first_name, 'email':request.user.email})\n formset1 = ConferenceProfileFormset(instance=request.user)\n \n form1.fields['email'].help_text = help_text\n form2 = PasswordChangeForm(request.user)\n\n return render_to_response('profile.html', {'form1':form1, 'formset1':formset1, 'form2':form2, 'profile_nav':1, 'mail_domain':MAIL_DOMAIN}, RequestContext(request))\n\n \n##########################################################################\n##########################################################################\n\n#ะžะฑัŠัะฒะปะตะฝะธั ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั\n@login_required\ndef profile_ad(request):\n t = AdBoardItem.activs.filter(user=request.user).order_by('id')\n \n return object_list( \n request,\n queryset = t,\n paginate_by = PAGINATION,\n template_object_name = 'items',\n template_name = 'ad_board/profile_ads.html',\n extra_context = {\n 'profile_nav': 2,\n }\n )\n \n#ะ ะตะดะฐะบั‚ะธั€ะพะฒะฐะฝะธะต ะžะฑัŠัะฒะปะตะฝะธั ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั \n@login_required\n@render_to('ad_board/profile_edit_ad.html')\ndef profile_ad_edit(request, id):\n try:\n id = int(id)\n except TypeError:\n raise Http404()\n \n try:\n obj = AdBoardItem.activs.get(id=id)\n except:\n raise Http404()\n\n form = AddAdBoardForm(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'ะžะฑัŠัะฒะปะตะฝะธะต ัƒัะฟะตัˆะฝะพ ะพั‚ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐะฝะพ.')\n return HttpResponseRedirect(u'%s' % obj.get_profile_ad_url())\n return {\n 'form': form,\n 'profile_nav':2,\n }\n \n#Elfkbnm ะžะฑัŠัะฒะปะตะฝะธั ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั \n@login_required\ndef profile_ad_delete(request, id):\n try:\n id = int(id)\n except TypeError:\n raise Http404()\n \n try:\n obj = AdBoardItem.activs.get(id=id)\n except:\n messages.add_message(request, messages.ERROR, 'ะžัˆะธะฑะบะฐ ัƒะดะฐะปะตะฝะธั ัƒะฑัŠัะฒะปะตะฝะธั! ะžะฑั€ะฐั‚ะธั‚ะตััŒ ะบ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ัƒ.')\n else:\n obj.is_active = False\n obj.save()\n messages.add_message(request, messages.INFO, 'ะžะฑัŠัะฒะปะตะฝะธะต ัƒัะฟะตัˆะฝะพ ัƒะดะฐะปะตะฝะพ.')\n\n return HttpResponseRedirect(\n reverse('profile_ad_url', args=[], kwargs={})\n )\n \n##########################################################################\n##########################################################################\n\n#ะขะตะผั‹ ะฒ ั„ะพั€ัƒะผะต ัะพะทะดะฐะฝะฝั‹ะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปะตะผ\n@login_required\ndef profile_thread(request):\n t = Thread.objects.select_related().filter(author=request.user).order_by('id')\n \n return object_list( \n request,\n queryset = t,\n paginate_by = FORUM_PAGINATION,\n template_object_name = 'thread',\n template_name = 'forum/profile_thread_list.html',\n extra_context = {\n 'profile_nav': 4,\n }\n )\n \n#ะกะพะพะฑั‰ะตะฝะธั ะฒ ั„ะพั€ัƒะผะต ัะพะทะดะฐะฝะฝั‹ะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปะตะผ\n@login_required\ndef profile_post(request):\n p = Post.objects.select_related().filter(author=request.user).order_by('time')\n \n return object_list( \n request,\n queryset = p,\n paginate_by = FORUM_PAGINATION,\n template_object_name = 'post',\n template_name = 'forum/profile_post_list.html',\n extra_context = {\n 'profile_nav': 5,\n }\n )\n\n##########################################################################\n##########################################################################\n\n\n#ะกะดะตะปะฐั‚ัŒ ะทะฐะบะฐะท\n@login_required\ndef order(request, id):\n try:\n p = ElectronicCatalogItem.activs.get(id = id)\n except:\n messages.add_message(request, messages.ERROR, u'ะžัˆะธะฑะบะฐ ะฟั€ะธ ะพั„ะพั€ะผะปะตะฝะธะธ ะทะฐะบะฐะทะฐ! ะžะฑั€ะฐั‚ะธั‚ะตััŒ ะบ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ัƒ ะธะปะธ ะฟะพะฒั‚ะพั€ะธั‚ะต ะฟะพะฟั‹ั‚ะบัƒ ะฟะพะทะถะต!')\n else:\n o = Order.objects.create(user = request.user, product = p, status = STATUS_ORDER[0][0], cost = p.cost)\n\n current_site = Site.objects.get_current()\n domain = current_site.domain\n \n users = User.objects.filter(is_staff=True, is_active=True)\n emails = [u.email for u in users]\n if emails:\n threading_send_mail('mail/order/create_msg_admin.html', u'ะะพะฒั‹ะน ะทะฐะบะฐะท ะฝะฐ ัะฐะนั‚ะต %s' % domain, emails, {'obj':o, 'domain':domain})\n if request.user.email:\n threading_send_mail('mail/order/create_msg.html', u'ะ’ะฐัˆ ะทะฐะบะฐะท ะฝะฐ ัะฐะนั‚ะต %s ะฟั€ะธะฝัั‚' % domain, emails, {'obj':o, 'domain':domain})\n \n messages.add_message(request, messages.INFO, u'ะกะฟะฐัะธะฑะพ, ะ’ะฐัˆ ะทะฐะบะฐะท ัะพะทะดะฐะฝ! ะ—ะฐ ัั‚ะฐั‚ัƒัะพะผ ะฒั‹ะฟะพะปะฝะตะฝะธั ะทะฐะบะฐะทะฐ ะ’ั‹ ะผะพะถะตั‚ะต ะฝะฐะฑะปัŽะดะฐั‚ัŒ ะฒ ะปะธั‡ะฝะพะผ ะบะฐะฑะธะฝะตั‚ะต!')\n\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n \n \n#ะ—ะฐะบะฐะทั‹ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั\n@login_required\ndef profile_order(request):\n t = Order.activs.filter(user=request.user)\n \n return object_list( \n request,\n queryset = t,\n paginate_by = PAGINATION,\n template_object_name = 'items',\n template_name = 'electronic_catalog/profile_orders.html',\n extra_context = {\n 'profile_nav': 3,\n }\n )\n", "sub_path": "www/iskcon/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9427, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.conf.settings", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 33, "usage_type": "argument"}, {"api_name": "article.models.CategoryArticle.activs.all", "line_number": 39, "usage_type": "call"}, {"api_name": "article.models.CategoryArticle.activs", "line_number": 39, "usage_type": "attribute"}, {"api_name": "article.models.CategoryArticle", "line_number": 39, "usage_type": "name"}, {"api_name": "article.models.ArticleItem.activs.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "article.models.ArticleItem.activs", "line_number": 40, "usage_type": "attribute"}, {"api_name": "article.models.ArticleItem", "line_number": 40, "usage_type": "name"}, {"api_name": "article.models.CategoryArticle.publics.all", "line_number": 42, "usage_type": "call"}, {"api_name": "article.models.CategoryArticle.publics", "line_number": 42, "usage_type": "attribute"}, {"api_name": "article.models.CategoryArticle", "line_number": 42, "usage_type": "name"}, {"api_name": "article.models.ArticleItem.publics.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "article.models.ArticleItem.publics", "line_number": 43, "usage_type": "attribute"}, {"api_name": "article.models.ArticleItem", "line_number": 43, "usage_type": "name"}, {"api_name": "django.views.generic.list_detail.object_list", "line_number": 45, "usage_type": "call"}, {"api_name": "annoying.decorators.render_to", "line_number": 36, "usage_type": "call"}, {"api_name": "feedback.views.feedback_views", "line_number": 57, "usage_type": "call"}, {"api_name": "conference.conf.MAIL_SERVER_INTERFACE", "line_number": 67, "usage_type": "attribute"}, {"api_name": "conference.conf", "line_number": 67, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 63, "usage_type": "name"}, {"api_name": "annoying.decorators.render_to", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.PasswordChangeForm", "line_number": 76, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 79, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 80, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 81, "usage_type": "name"}, {"api_name": "django.contrib.messages.ERROR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 73, "usage_type": "name"}, {"api_name": "iskcon.forms.ProfileForm", "line_number": 95, "usage_type": "call"}, {"api_name": "conference.forms.ConferenceProfileFormset", "line_number": 96, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 101, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 102, "usage_type": "call"}, {"api_name": "iskcon.forms.ProfileForm", "line_number": 104, "usage_type": "call"}, {"api_name": "conference.forms.ConferenceProfileFormset", "line_number": 105, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.PasswordChangeForm", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 110, "usage_type": "call"}, {"api_name": "conference.conf.MAIL_DOMAIN", "line_number": 110, "usage_type": "name"}, {"api_name": "django.template.RequestContext", "line_number": 110, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 87, "usage_type": "name"}, {"api_name": "ad_board.models.AdBoardItem.activs.filter", "line_number": 119, "usage_type": "call"}, {"api_name": "ad_board.models.AdBoardItem.activs", "line_number": 119, "usage_type": "attribute"}, {"api_name": "ad_board.models.AdBoardItem", "line_number": 119, "usage_type": "name"}, {"api_name": "django.views.generic.list_detail.object_list", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 117, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 139, "usage_type": "call"}, {"api_name": "ad_board.models.AdBoardItem.activs.get", "line_number": 142, "usage_type": "call"}, {"api_name": "ad_board.models.AdBoardItem.activs", "line_number": 142, "usage_type": "attribute"}, {"api_name": "ad_board.models.AdBoardItem", "line_number": 142, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 144, "usage_type": "call"}, {"api_name": "ad_board.forms.AddAdBoardForm", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 149, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 149, "usage_type": "name"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 149, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 150, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 133, "usage_type": "name"}, {"api_name": "annoying.decorators.render_to", "line_number": 134, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 162, "usage_type": "call"}, {"api_name": "ad_board.models.AdBoardItem.activs.get", "line_number": 165, "usage_type": "call"}, {"api_name": "ad_board.models.AdBoardItem.activs", "line_number": 165, "usage_type": "attribute"}, {"api_name": "ad_board.models.AdBoardItem", "line_number": 165, "usage_type": "name"}, {"api_name": "django.contrib.messages.add_message", "line_number": 167, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 167, "usage_type": "name"}, {"api_name": "django.contrib.messages.ERROR", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.contrib.messages.add_message", "line_number": 171, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 171, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 171, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 173, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 174, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 157, "usage_type": "name"}, {"api_name": "forum.models.Thread.objects.select_related", "line_number": 183, "usage_type": "call"}, {"api_name": "forum.models.Thread.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "forum.models.Thread", "line_number": 183, "usage_type": "name"}, {"api_name": "django.views.generic.list_detail.object_list", "line_number": 185, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 181, "usage_type": "name"}, {"api_name": "forum.models.Post.objects.select_related", "line_number": 199, "usage_type": "call"}, {"api_name": "forum.models.Post.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "forum.models.Post", "line_number": 199, "usage_type": "name"}, {"api_name": "django.views.generic.list_detail.object_list", "line_number": 201, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 197, "usage_type": "name"}, {"api_name": "electronic_catalog.models.ElectronicCatalogItem.activs.get", "line_number": 220, "usage_type": "call"}, {"api_name": "electronic_catalog.models.ElectronicCatalogItem.activs", "line_number": 220, "usage_type": "attribute"}, {"api_name": "electronic_catalog.models.ElectronicCatalogItem", "line_number": 220, "usage_type": "name"}, {"api_name": "django.contrib.messages.add_message", "line_number": 222, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 222, "usage_type": "name"}, {"api_name": "django.contrib.messages.ERROR", "line_number": 222, "usage_type": "attribute"}, {"api_name": "iskcon.models.Order.objects.create", "line_number": 224, "usage_type": "call"}, {"api_name": "iskcon.models.Order.objects", "line_number": 224, "usage_type": "attribute"}, {"api_name": "iskcon.models.Order", "line_number": 224, "usage_type": "name"}, {"api_name": "iskcon.models.STATUS_ORDER", "line_number": 224, "usage_type": "name"}, {"api_name": "django.contrib.sites.models.Site.objects.get_current", "line_number": 226, "usage_type": "call"}, {"api_name": "django.contrib.sites.models.Site.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "django.contrib.sites.models.Site", "line_number": 226, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 229, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 229, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 229, "usage_type": "name"}, {"api_name": "threadmail.threading_send_mail", "line_number": 232, "usage_type": "call"}, {"api_name": "threadmail.threading_send_mail", "line_number": 234, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 236, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 236, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 236, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 238, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 217, "usage_type": "name"}, {"api_name": "iskcon.models.Order.activs.filter", "line_number": 244, "usage_type": "call"}, {"api_name": "iskcon.models.Order.activs", "line_number": 244, "usage_type": "attribute"}, {"api_name": "iskcon.models.Order", "line_number": 244, "usage_type": "name"}, {"api_name": "django.views.generic.list_detail.object_list", "line_number": 246, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 242, "usage_type": "name"}]} {"seq_id": "337213393", "text": "import os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport librosa\nfrom cyclegan.segmentation_models.nestnet import Nestnet as Generator\nfrom cyclegan.model.vgg_model import vgg16_model as Discriminator\n\nfrom cyclegan.helpers.signal import preprocessing_fn, inverse_fn, write_audio, mag_phase_to_S, mel_spec\nfrom cyclegan.helpers.signal import mag_processing, mag_inverse, to_cqt, load_audio\nfrom cyclegan.helpers.plot import plot_heat_map\nfrom cyclegan.settings import DEFAULT_SAMPLING_RATE\nimport tensorflow as tf\n\nTEST_DIR = '/home/gtzan/ssd/test'\ninp = '/home/gtzan/data/fma_large/003/003771.mp3'\n\n# CQT\naudio = load_audio(inp, sr=DEFAULT_SAMPLING_RATE)\nmag, phase = to_cqt(audio)\nmag = mag_processing(mag, crop_hf=False)\n# plot_heat_map(mag, title='cqt')\n\n# STFT\nmag, phase = preprocessing_fn(inp)\nprint(mag.shape)\nplot_heat_map(mag, title='piano_stft')\nplot_heat_map(mel_spec(mag), title=os.path.basename(inp))\n\naudio_out = inverse_fn(mag, phase)\noutput_filename = os.path.join(TEST_DIR, f'clear_{os.path.basename(inp)}')\nwrite_audio(output_filename, audio_out, DEFAULT_SAMPLING_RATE)\n\nmag = mag_inverse(mag, phase.shape)\nS = mag_phase_to_S(mag, phase)\n\nharm, perc = librosa.decompose.hpss(S)\nharm_mag, harm_phase = librosa.magphase(harm)\nperc_mag, perc_phase = librosa.magphase(perc)\nharm_mag = mag_processing(harm_mag)\nperc_mag = mag_processing(perc_mag)\n\nplot_heat_map(harm_mag, title='harmonic')\naudio_out = inverse_fn(harm_mag, phase)\noutput_filename = os.path.join(TEST_DIR, f'harmonic_{os.path.basename(inp)}')\nwrite_audio(output_filename, audio_out, DEFAULT_SAMPLING_RATE)\n\nplot_heat_map(perc_mag, title='percussion')\naudio_out = inverse_fn(perc_mag, phase)\noutput_filename = os.path.join(TEST_DIR, f'percussion_{os.path.basename(inp)}')\nwrite_audio(output_filename, audio_out, DEFAULT_SAMPLING_RATE)\n\n\ngenerator_g = Generator(backbone_name='vgg16',\n input_shape=(None, None, 3),\n decoder_filters=(256, 128, 64, 32, 16),\n classes=3,\n activation='tanh')\n\ndiscriminator_x = Discriminator(norm_type='instancenorm', target=False)\n\nmag = mag_processing(mag)\n\nfake_x = generator_g(mag)\nplot_heat_map(fake_x, title='fake_x')\n\nplot_heat_map(mel_spec(mag), title='mel_real_x')\n\ndisc_real_x = discriminator_x(mag)\nplot_heat_map(disc_real_x, title='disc_real_x')\n\ndisc_fake_x = discriminator_x(mel_spec(fake_x))\nplot_heat_map(disc_fake_x, title='disc_fake_x')\n", "sub_path": "cyclegan/prototypes/test_model.py", "file_name": "test_model.py", "file_ext": "py", "file_size_in_byte": 2455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "cyclegan.helpers.signal.load_audio", "line_number": 18, "usage_type": "call"}, {"api_name": "cyclegan.settings.DEFAULT_SAMPLING_RATE", "line_number": 18, "usage_type": "name"}, {"api_name": "cyclegan.helpers.signal.to_cqt", "line_number": 19, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mag_processing", "line_number": 20, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.preprocessing_fn", "line_number": 24, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 26, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 27, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mel_spec", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cyclegan.helpers.signal.inverse_fn", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 30, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.write_audio", "line_number": 31, "usage_type": "call"}, {"api_name": "cyclegan.settings.DEFAULT_SAMPLING_RATE", "line_number": 31, "usage_type": "argument"}, {"api_name": "cyclegan.helpers.signal.mag_inverse", "line_number": 33, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mag_phase_to_S", "line_number": 34, "usage_type": "call"}, {"api_name": "librosa.decompose.hpss", "line_number": 36, "usage_type": "call"}, {"api_name": "librosa.decompose", "line_number": 36, "usage_type": "attribute"}, {"api_name": "librosa.magphase", "line_number": 37, "usage_type": "call"}, {"api_name": "librosa.magphase", "line_number": 38, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mag_processing", "line_number": 39, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mag_processing", "line_number": 40, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 42, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.inverse_fn", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 44, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.write_audio", "line_number": 45, "usage_type": "call"}, {"api_name": "cyclegan.settings.DEFAULT_SAMPLING_RATE", "line_number": 45, "usage_type": "argument"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 47, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.inverse_fn", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 49, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.write_audio", "line_number": 50, "usage_type": "call"}, {"api_name": "cyclegan.settings.DEFAULT_SAMPLING_RATE", "line_number": 50, "usage_type": "argument"}, {"api_name": "cyclegan.segmentation_models.nestnet.Nestnet", "line_number": 53, "usage_type": "call"}, {"api_name": "cyclegan.model.vgg_model.vgg16_model", "line_number": 59, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mag_processing", "line_number": 61, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 64, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 66, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mel_spec", "line_number": 66, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 69, "usage_type": "call"}, {"api_name": "cyclegan.helpers.signal.mel_spec", "line_number": 71, "usage_type": "call"}, {"api_name": "cyclegan.helpers.plot.plot_heat_map", "line_number": 72, "usage_type": "call"}]} {"seq_id": "363455087", "text": "# -*- coding: utf-8 -*-\nfrom flask import Flask\nfrom flask_restful import Api, Resource, reqparse\nfrom io import BytesIO\nfrom PIL import Image\nimport utool as ut\nimport numpy as np\nimport base64\nimport torch\nfrom torchvision import transforms\nfrom fastai.vision import pil2tensor, imagenet_stats\nfrom utils import batched_dmv, dm2cm\nfrom arch import make_new_network, L2Norm, get_device\nfrom train_VGG16 import RING_HEADS, GEM_CONST, SZH, SZW\n\n\nAPP = Flask(__name__)\nAPI = Api(APP)\n\nNETWORK_MODEL_TAG = None\nNETWORK = None\nNETWORK_VALUES = None\n\nmodel_url_dict = {\n 'crc': 'https://cthulhu.dyn.wildme.io/public/models/kaggle7.crc.final.1.pth',\n}\n\nCMODE = 'RGB'\nSIZE = (SZH, SZW)\nTFRM_RESIZE = transforms.Resize(SIZE)\nTFRM_WHITEN = transforms.Normalize(*imagenet_stats)\nTFRM_L2NORM = L2Norm()\n\n\ndef get_image_from_base64_str(image_base64_str):\n image = Image.open(BytesIO(base64.b64decode(image_base64_str)))\n return image\n\n\nclass Kaggle7(Resource):\n def post(self):\n global NETWORK_MODEL_TAG\n global NETWORK\n global NETWORK_VALUES\n\n response = {'success': False}\n\n # ut.embed()\n\n try:\n with ut.Timer('Pre'):\n parser = reqparse.RequestParser()\n parser.add_argument('image', type=str)\n parser.add_argument('config', type=dict)\n args = parser.parse_args()\n\n image_base64_str = args['image']\n image = get_image_from_base64_str(image_base64_str)\n\n config = args['config']\n model_tag = config.get('model_tag', None)\n num_returns = config.get('topk', 100)\n\n model_url = model_url_dict.get(model_tag, None)\n\n assert model_url is not None, 'Model tag %r is not recognized' % (model_tag,)\n if model_tag != NETWORK_MODEL_TAG:\n with ut.Timer('Loading network'):\n print('Loading network from weights %r' % (model_tag,))\n values_url = model_url.replace('.pth', '.values.pth')\n\n # Download files\n model_filepath = ut.grab_file_url(\n model_url, appname='kaggle7', check_hash=True\n )\n values_filepath = ut.grab_file_url(\n values_url, appname='kaggle7', check_hash=True\n )\n\n model_values = torch.load(values_filepath)\n classes = model_values['classes']\n num_classes = len(classes)\n\n model_weights = torch.load(model_filepath, map_location=get_device())\n network_model, mutliple = make_new_network(\n num_classes, RING_HEADS, GEM_CONST, pretrained=False\n )\n\n if mutliple:\n pass\n\n if torch.cuda.is_available():\n network_model = network_model.cuda()\n\n # model_weights = model_weights['model']\n network_model.load_state_dict(model_weights)\n network_model.eval()\n\n NETWORK_MODEL_TAG = model_tag\n NETWORK = network_model\n NETWORK_VALUES = model_values\n\n print('Using network %r' % (NETWORK_MODEL_TAG,))\n with ut.Timer('Loading input tensor'):\n input_image = image.convert(CMODE).convert('LA').convert(CMODE)\n input_image = TFRM_RESIZE(input_image)\n input_image = pil2tensor(input_image, np.float32)\n input_image = input_image.div_(255)\n input_image = TFRM_WHITEN(input_image)\n\n size = input_image.size()\n input_tensor = input_image.view(-1, size[0], size[1], size[2])\n input_tensor = input_tensor.to(get_device())\n\n # Run inference\n with ut.Timer('Inference'):\n print('Running inference on input tensor %r' % (input_tensor.size(),))\n output = NETWORK(input_tensor)\n print('...done')\n preds_list, feats_list = output\n\n with ut.Timer('Post1'):\n print('Performing post-processing')\n prediction_raw = preds_list[-1][0]\n features_raw = TFRM_L2NORM(torch.cat(feats_list, dim=1))[0]\n\n with ut.Timer('Post2'):\n print('...classifier')\n # Post Process classification\n classifier_temp = NETWORK_VALUES['thresholds']['classifier_softmax_temp']\n classifier_prediction = torch.softmax(\n prediction_raw / classifier_temp, dim=0\n )\n\n with ut.Timer('Post3'):\n # Post process features\n print('...features')\n train_feats = NETWORK_VALUES['train_feats']\n train_gt = NETWORK_VALUES['train_gt']\n size = features_raw.size()\n features = features_raw.view(-1, size[0])\n distance_matrix_imgs = batched_dmv(features, train_feats)\n distance_matrix_classes = dm2cm(distance_matrix_imgs, train_gt)\n features_sim = (2.0 - distance_matrix_classes) * 0.5\n features_sim = features_sim[0]\n\n features_temp = NETWORK_VALUES['thresholds']['feature_softmax_temp']\n features_prediction = torch.softmax(features_sim / features_temp, dim=0)\n\n with ut.Timer('Post4'):\n print('...mixing')\n p = NETWORK_VALUES['thresholds']['mixing_value']\n classifier_prediction = classifier_prediction.to('cpu')\n final_prediction = (\n p * classifier_prediction + (1.0 - p) * features_prediction\n )\n\n with ut.Timer('Collection'):\n print('Collecting prediction')\n top_k_score_list, top_k_index_list = final_prediction.topk(num_returns, 0)\n top_k_score_list = top_k_score_list.detach().tolist()\n classes = NETWORK_VALUES['classes']\n top_k_class_list = ut.take(classes, top_k_index_list)\n\n response['scores'] = {}\n for top_k_class, top_k_score in zip(top_k_class_list, top_k_score_list):\n response['scores'][top_k_class] = top_k_score\n response['success'] = True\n\n print('...done')\n except Exception as ex:\n message = str(ex)\n response['message'] = message\n print('!!!ERROR!!!')\n print(response)\n\n # if torch.cuda.is_available():\n # torch.cuda.empty_cache()\n\n return response\n\n\nAPI.add_resource(Kaggle7, '/api/classify')\n\n\nif __name__ == '__main__':\n APP.run(host='0.0.0.0', port=5000)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 6884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 18, "usage_type": "call"}, {"api_name": "train_VGG16.SZH", "line_number": 29, "usage_type": "name"}, {"api_name": "train_VGG16.SZW", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "fastai.vision.imagenet_stats", "line_number": 31, "usage_type": "name"}, {"api_name": "arch.L2Norm", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 36, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 36, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 40, "usage_type": "name"}, {"api_name": "utool.Timer", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 52, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 52, "usage_type": "name"}, {"api_name": "utool.Timer", "line_number": 68, "usage_type": "call"}, {"api_name": "utool.grab_file_url", "line_number": 73, "usage_type": "call"}, {"api_name": "utool.grab_file_url", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 84, "usage_type": "call"}, {"api_name": "arch.get_device", "line_number": 84, "usage_type": "call"}, {"api_name": "arch.make_new_network", "line_number": 85, "usage_type": "call"}, {"api_name": "train_VGG16.RING_HEADS", "line_number": 86, "usage_type": "argument"}, {"api_name": "train_VGG16.GEM_CONST", "line_number": 86, "usage_type": "argument"}, {"api_name": "torch.cuda.is_available", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 92, "usage_type": "attribute"}, {"api_name": "utool.Timer", "line_number": 104, "usage_type": "call"}, {"api_name": "fastai.vision.pil2tensor", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 107, "usage_type": "attribute"}, {"api_name": "arch.get_device", "line_number": 113, "usage_type": "call"}, {"api_name": "utool.Timer", "line_number": 116, "usage_type": "call"}, {"api_name": "utool.Timer", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 125, "usage_type": "call"}, {"api_name": "utool.Timer", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 131, "usage_type": "call"}, {"api_name": "utool.Timer", "line_number": 135, "usage_type": "call"}, {"api_name": "utils.batched_dmv", "line_number": 142, "usage_type": "call"}, {"api_name": "utils.dm2cm", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 148, "usage_type": "call"}, {"api_name": "utool.Timer", "line_number": 150, "usage_type": "call"}, {"api_name": "utool.Timer", "line_number": 158, "usage_type": "call"}, {"api_name": "utool.take", "line_number": 163, "usage_type": "call"}]} {"seq_id": "119655464", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django_markdown.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('routes', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='route',\n name='budget',\n field=models.CharField(default='', max_length=255, verbose_name='\\u0431\\u044e\\u0434\\u0436\\u0435\\u0442', blank=True),\n ),\n migrations.AddField(\n model_name='route',\n name='distance',\n field=models.CharField(default='', max_length=255, verbose_name='\\u043f\\u0440\\u043e\\u0442\\u044f\\u0436\\u0435\\u043d\\u043d\\u043e\\u0441\\u0442\\u044c \\u043c\\u0430\\u0440\\u0448\\u0440\\u0443\\u0442\\u0430', blank=True),\n ),\n migrations.AddField(\n model_name='route',\n name='duration',\n field=models.CharField(default='', max_length=255, verbose_name='\\u0434\\u043b\\u0438\\u0442\\u0435\\u043b\\u044c\\u043d\\u043e\\u0441\\u0442\\u044c \\u043f\\u0440\\u043e\\u0433\\u0443\\u043b\\u043a\\u0438', blank=True),\n ),\n migrations.AddField(\n model_name='route',\n name='finish',\n field=django_markdown.models.MarkdownField(default='', verbose_name='\\u0444\\u0438\\u043d\\u0438\\u0448'),\n ),\n migrations.AddField(\n model_name='route',\n name='preparation',\n field=django_markdown.models.MarkdownField(default='', verbose_name='\\u043f\\u043e\\u0434\\u0433\\u043e\\u0442\\u043e\\u0432\\u043a\\u0430'),\n ),\n migrations.AlterField(\n model_name='route',\n name='video_code',\n field=models.TextField(default='', verbose_name='\\u043a\\u043e\\u0434 \\u0432\\u0438\\u0434\\u0435\\u043e', blank=True),\n ),\n ]\n", "sub_path": "src/admin_app/routes/migrations/0002_auto_20150717_1304.py", "file_name": "0002_auto_20150717_1304.py", "file_ext": "py", "file_size_in_byte": 1825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 30, "usage_type": "name"}, {"api_name": "django_markdown.models.models.MarkdownField", "line_number": 33, "usage_type": "call"}, {"api_name": "django_markdown.models.models", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django_markdown.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 35, "usage_type": "name"}, {"api_name": "django_markdown.models.models.MarkdownField", "line_number": 38, "usage_type": "call"}, {"api_name": "django_markdown.models.models", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django_markdown.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}]} {"seq_id": "264841664", "text": "#!/usr/bin/python\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport sys\nimport time\n\n\nclass Top10Movies(MRJob):\n\n def steps(self):\n return\n [\n MRStep\n (\n mapper=self.mapper_getMovieRatigns,\n )\n ]\n\n def mapper_getMovieRatigns(self, _, line):\n lst = line.split(\"::\")\n (userId,gender,age,occupation,zipcode) = line.strip().split(\"::\")\n userId=int(userId)\n if(int(age)<=20):\n yield userId, int(age)\n\n\nif __name__ == '__main__':\n start = time.time()\n Top10Movies.run()\n sys.stderr.write(\"Total Job Execution time %s secs\\n\"%(time.time()-start))\n", "sub_path": "Coding Files/MRJobs_1_problem3.py", "file_name": "MRJobs_1_problem3.py", "file_ext": "py", "file_size_in_byte": 679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "mrjob.job.MRJob", "line_number": 9, "usage_type": "name"}, {"api_name": "mrjob.step.MRStep", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 31, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}]} {"seq_id": "395551313", "text": "from django.contrib.auth.models import User\n\n\nclass CrappyDBXBackend(object):\n\n def authenticate(self, dbx_user_id=None):\n if not dbx_user_id or len(dbx_user_id.strip()) == 0:\n return None\n user, _ = User.objects.get_or_create(username=dbx_user_id)\n return user\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n", "sub_path": "spike/flow/backend.py", "file_name": "backend.py", "file_ext": "py", "file_size_in_byte": 450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.contrib.auth.models.User.objects.get_or_create", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 15, "usage_type": "name"}]} {"seq_id": "583325795", "text": "import lib.interface as interface\nimport lib.arquivo as arquivo\n\narq='cursoemvideo.txt'\n\nif not arquivo.arquivoExiste(arq):\n print('Arquivo encontrado com sucesso')\n arquivo.criarArquivo(arq)\n\nwhile True:\n resposta=interface.menu(['Listar Pessoas','Cadastrar Pessoas',\n 'Sair do Sistema'])\n if resposta==1:\n # Listar arquvio\n arquivo.lerArquivo(arq)\n elif resposta==2:\n interface.cabecalho('Novo Cadastro')\n nome=str(input('Nome: '))\n idade=interface.leiaInt('Idade: ')\n arquivo.cadastrar(arq,nome,idade)\n elif resposta==3:\n print(interface.cabecalho('Saindo do Sistema'))\n break\n else:\n print('Erro, digite umma opรงรฃo vรกlida.')", "sub_path": "study/curso-em-video/exercises/ex115/sistema.py", "file_name": "sistema.py", "file_ext": "py", "file_size_in_byte": 743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "lib.arquivo.arquivoExiste", "line_number": 6, "usage_type": "call"}, {"api_name": "lib.arquivo", "line_number": 6, "usage_type": "name"}, {"api_name": "lib.arquivo.criarArquivo", "line_number": 8, "usage_type": "call"}, {"api_name": "lib.arquivo", "line_number": 8, "usage_type": "name"}, {"api_name": "lib.interface.menu", "line_number": 11, "usage_type": "call"}, {"api_name": "lib.interface", "line_number": 11, "usage_type": "name"}, {"api_name": "lib.arquivo.lerArquivo", "line_number": 15, "usage_type": "call"}, {"api_name": "lib.arquivo", "line_number": 15, "usage_type": "name"}, {"api_name": "lib.interface.cabecalho", "line_number": 17, "usage_type": "call"}, {"api_name": "lib.interface", "line_number": 17, "usage_type": "name"}, {"api_name": "lib.interface.leiaInt", "line_number": 19, "usage_type": "call"}, {"api_name": "lib.interface", "line_number": 19, "usage_type": "name"}, {"api_name": "lib.arquivo.cadastrar", "line_number": 20, "usage_type": "call"}, {"api_name": "lib.arquivo", "line_number": 20, "usage_type": "name"}, {"api_name": "lib.interface.cabecalho", "line_number": 22, "usage_type": "call"}, {"api_name": "lib.interface", "line_number": 22, "usage_type": "name"}]} {"seq_id": "217286681", "text": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import f1_score, classification_report\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndef load_data(filename):\n with open(filename, \"rb\") as fp:\n for i, line in enumerate(fp):\n if \"Buchung\" in str(line):\n skiprows = i\n break\n\n data = pd.read_csv(filename, skiprows=skiprows, sep=\";\", quotechar='\"', encoding=\"Cp1252\", decimal=\",\", thousands=\".\",\n dtype={\"Buchung\": np.str, \"Valuta\": np.str}, parse_dates=True)\n data.Buchung = data.Buchung.apply(lambda x: datetime.datetime.strptime(x, \"%d.%m.%Y\"))\n data.Valuta = data.Valuta.apply(lambda x: datetime.datetime.strptime(x, \"%d.%m.%Y\"))\n return data\n\n\ndef get_filenames(folder=\"D:/Dropbox/Finanzen/Ing DIBA/Input/\", extension=\"*.csv\"):\n \"\"\"\n This function gets all filenames of type \"ending\" in path \"folder\" and returns a list of all files.\n \"\"\"\n filename_list = []\n import glob, os\n os.chdir(folder)\n for file in glob.glob(extension):\n filename_list.append(file)\n return filename_list\n\n\ndef merge_files(filename_list, drop_duplicates=True, drop_labels=[\"Buchung\", \"Betrag\", \"Buchungstext\"]):\n \"\"\"\n This function takes a list of files, loads the data from those files, concatenates the DataFrames, removes\n duplicates and returns the processed DataFrame.\n \"\"\"\n data = pd.DataFrame()\n\n for filename in filename_list:\n account_number = filename.split(\"_\")[1]\n df = load_data(filename=filename)\n df[\"account_number\"] = account_number\n data = data.append(other=df)\n\n data = data.sort_values(\"Buchung\", ascending=True)\n\n # Find intial Account level (Saldo) for each account and add the total (aggregated value of each account) as a\n # baseline account level.\n first_lines_per_account = data.groupby(\"account_number\").first().iterrows()\n total_saldo_before_first_entry = 0\n for line in first_lines_per_account:\n line_saldo_before_first_entry = line[1].Saldo - line[1].Betrag\n total_saldo_before_first_entry += line_saldo_before_first_entry\n\n if drop_duplicates:\n data = data.drop_duplicates(keep=\"first\", subset=drop_labels)\n\n data[\"Betrag_cumsum\"] = data[\"Betrag\"].cumsum()\n data[\"Betrag_cumsum\"] += total_saldo_before_first_entry\n return data\n\n\ndef plot_account_data(giro_data, depot_data):\n from matplotlib import pyplot as plt\n import matplotlib.dates as mdates\n\n # Convert data to time series with daily values (asfreq \"D\").\n # Also add a comulated time series (giro+depot).\n first_day = min(giro_data[\"Buchung\"].min(), depot_data[\"Buchung\"].min())\n last_day = max(giro_data[\"Buchung\"].max(), depot_data[\"Buchung\"].max())\n ix = pd.DatetimeIndex(start=first_day, end=last_day, freq='D')\n\n giro_daily = giro_data.groupby(\"Buchung\").Betrag.sum()\n giro_daily = giro_daily.asfreq(\"D\").fillna(0)\n\n giro_cumulated_daily = giro_data.groupby(\"Buchung\").Betrag_cumsum.last()\n giro_cumulated_daily = giro_cumulated_daily.reindex(ix).fillna(method=\"ffill\")\n giro_cumulated_daily = giro_cumulated_daily.fillna(0)\n\n depot_cumulated_daily = depot_data[\"Betrag_cumsum\"]\n depot_cumulated_daily = depot_cumulated_daily.reindex(ix).fillna(method=\"ffill\")\n depot_cumulated_daily = depot_cumulated_daily.fillna(0)\n\n aggregated_cum_daily = giro_cumulated_daily.add(depot_cumulated_daily, fill_value=0)\n\n # Prepare figure and subplots\n f, axarr = plt.subplots(4, sharex=True)\n\n # Giro data changes\n axarr[0].set_title(\"Bookings and cumulated bookings over time\")\n axarr[0].bar(giro_daily.index, giro_daily)\n axarr[0].set_ylabel(\"Giro account changes (daily)\")\n\n # Giro data total\n axarr[1].step(giro_cumulated_daily.index, giro_cumulated_daily, color=\"blue\")\n axarr[1].set_ylabel(\"Giro total\")\n\n # Depot data total\n axarr[2].step(depot_cumulated_daily.index, depot_cumulated_daily, color=\"green\")\n axarr[2].set_ylabel(\"Depot total\")\n\n # Account plus depot total\n from sklearn.linear_model import LinearRegression\n axarr[3].step(aggregated_cum_daily.index, aggregated_cum_daily, color=\"red\", label=\"Cumulated\")\n x,y = zip(*enumerate(aggregated_cum_daily))\n fit = np.polyfit(x,y,1)\n fit_fn = np.poly1d(fit)\n axarr[3].plot(aggregated_cum_daily.index, fit_fn(x), \"--k\", label=u\"total saving rate : {:3.0f}โ‚ฌ/m\".format(365./12*fit_fn.c[0]))\n\n x_365 = x[-365:]\n y_365 = y[-365:]\n fit_365 = np.polyfit(x_365, y_365, 1)\n fit_fn_365 = np.poly1d(fit_365)\n axarr[3].plot(aggregated_cum_daily.index[-365:], fit_fn_365(x_365), \"--b\", label=u\"last year saving rate : {:3.0f}โ‚ฌ/m\".format(365./12*fit_fn_365.c[0]))\n axarr[3].set_ylabel(\"Giro + Depot (Daily)\")\n axarr[3].set_xlabel(\"Date\")\n axarr[3].legend(loc=\"best\")\n\n plt.show()\n\n\ndef parse_depot_files():\n \"\"\"\n This function browses a depot folder (a folder dedicated to contain only CSV related information).\n\n It parses each file for date and a DataFrame containing the complete stock.\n\n It appends all DataFrames plus a TimeSeries representing the total stock value at any parsing time.\n \"\"\"\n folder = \"D:/Dropbox/Finanzen/Ing DIBA/depot\"\n filenames = get_filenames(folder=folder, extension=\"*.csv\")\n data = pd.DataFrame()\n for filename in filenames:\n account_number = filename.split(\"_\")[1]\n # Get the DataFrame\n df = pd.read_csv(filename, skiprows=5, sep=\";\", quotechar='\"', encoding=\"Cp1252\", decimal=\",\", thousands=\".\",\n dtype={\"Zeit\": np.str, \"Valuta\": np.str}, parse_dates=True)\n\n # Drop any line that doesnt contain an ISIN (Stock ID)\n df = df[df.ISIN.notnull()]\n df[\"account_number\"] = account_number\n\n data = data.append(df)\n data.Zeit = data.Zeit.apply(lambda x: datetime.datetime.strptime(x, \"%d.%m.%Y\"))\n data = data.sort_values(\"Zeit\", ascending=True)\n\n data.drop_duplicates(keep=\"first\", inplace=True)\n\n depot_cum_value = data.groupby(\"Zeit\").Kurswert.sum()\n depot_cum_value.name = \"Betrag_cumsum\"\n\n # depot_cum_value.index.name = \"Buchung\"\n\n depot_cum_value = pd.DataFrame(depot_cum_value)\n depot_cum_value[\"Betrag\"] = depot_cum_value.diff()\n depot_cum_value[\"Betrag\"] = depot_cum_value[\"Betrag\"].fillna(0)\n depot_cum_value[\"Buchung\"] = depot_cum_value.index\n depot_cum_value = depot_cum_value.asfreq(\"D\").fillna(method=\"ffill\")\n return data, depot_cum_value\n\n\ndef get_features(dataset, vect=None, dropna=False):\n X = dataset[u\"Auftraggeber/Empfรคnger\"].str.strip() + \" | \" + dataset[\"Verwendungszweck\"].str.strip() + \" | \" + dataset[\"Buchungstext\"].str.strip()\n if dropna:\n X.dropna(inplace=True, axis=0)\n else:\n X.fillna(\"\", inplace=True, axis=0)\n\n if not vect:\n vect = TfidfVectorizer(ngram_range=(1, 3), max_df=1.)\n X_tfidf = vect.fit_transform(X)\n else:\n X_tfidf = vect.transform(X)\n return pd.DataFrame(X, columns=[\"features\"]), X_tfidf, vect\n\n\ndef get_labels(dataset):\n le = preprocessing.LabelEncoder()\n y1_ = le.fit_transform(dataset.Labels)\n return y1_, le\n\n\ndef create_classifier():\n # Predicting category based on receipient and comment\n\n # Load the labels data and reduce it to only approved rows.\n data = pd.read_excel(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels.xlsx\")\n data.dropna(axis=0, how=\"any\", inplace=True)\n # data = pd.read_csv(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels_.csv\", sep=\";\")\n data_labelled = data[data[\"status\"] == \"approved\"]\n data_new = data[data[\"status\"] != \"approved\"]\n X = data_labelled[\"features\"]\n y = data_labelled[\"label\"]\n X_new = data_new[\"features\"]\n\n # Use a td-idf vectorizor to convert the text data into machine readable format.\n vectorizor = TfidfVectorizer(ngram_range=(1, 3), max_df=1.)\n X_tfidf = vectorizor.fit_transform(X)\n\n # Use label enconder to transform text labels into machine readable format.\n label_encoder = preprocessing.LabelEncoder()\n y_encoded = label_encoder.fit_transform(y)\n labels = y.unique()\n labels_enc = label_encoder.transform(labels)\n\n # Classifier is a Decision Tree Classifier\n # clf = tree.DecisionTreeClassifier()\n clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n\n # CROSSVALIDATION\n kf = StratifiedKFold(n_splits=3, shuffle=True)\n scores = []\n f1_scores = []\n for train, test in kf.split(X_tfidf, y_encoded):\n X_train_ = X_tfidf[train]\n y_train_ = y_encoded[train]\n\n X_test_ = X_tfidf[test]\n y_test_ = y_encoded[test]\n\n clf = clf.fit(X_train_, y_train_)\n y_pred_ = clf.predict(X_test_)\n\n scores.append(clf.score(X_test_, y_test_))\n f1_scores.append(f1_score(y_test_, y_pred_, average=\"weighted\"))\n\n print(\"mean scores: %3.3f\" % np.mean(scores))\n print(\"mean f1 scores: %3.3f\" % np.mean(f1_scores))\n\n y_pred = label_encoder.inverse_transform(y_pred_)\n y_test = label_encoder.inverse_transform(y_test_)\n print(\"Classification report from last fold:\")\n print(classification_report(y_test, y_pred))\n\n # PREDICT\n clf.fit(X_tfidf, y_encoded)\n\n if len(data_new)>0:\n X_new_tfidf = vectorizor.transform(X_new)\n y_new_enc = clf.predict(X_new_tfidf)\n y_new = label_encoder.inverse_transform(y_new_enc)\n data_new.ix[:,\"label\"] = y_new\n # data = data_labelled.append(other=data_new, ignore_index=True).reset_index(drop=True)\n \"\"\"data.to_excel(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels.xlsx\", sheet_name='labels', na_rep='', header=True,\n index=False, encoding=None)\"\"\"\n\n # data.to_csv(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels.csv\", sep=\";\", index=False)\n return clf, vectorizor, label_encoder\n\n\ndef classify(giro_data, clf, vectorizor, label_encoder):\n X, X_, _ = get_features(giro_data, vectorizor, dropna=False)\n y_ = clf.predict(X_)\n y = label_encoder.inverse_transform(y_)\n giro_data[\"Labels\"] = y\n\n old_labels_df = pd.read_excel(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels.xlsx\")\n old_labels_df = old_labels_df[old_labels_df[\"status\"] == \"approved\"]\n\n new_labels_df = X\n new_labels_df[\"label\"] = y\n new_labels_df[\"status\"] = \"new\"\n\n old_labels_df = old_labels_df.append(new_labels_df).drop_duplicates()\n\n old_labels_df.to_csv(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels_.csv\", sep=\";\", index=False)\n old_labels_df.to_excel(\"D:/Dropbox/Finanzen/Ing DIBA/output/labels.xlsx\",index=False)\n return giro_data\n\ndef more_info_on(data, label):\n subset = data[data[\"Labels\"]==label]\n subset = subset[[\"year\", \"Betrag\", \"Buchung\", \"Labels\", \"Labels2\", u\"Auftraggeber/Empfรคnger\", \"Verwendungszweck\"]].sort_values([\"Buchung\", \"Betrag\"])\n return subset\n\n#def main():\n\n\n #TODO: Plot: Target Saving Rate\n #TODO: Plot: Aggregate per month\n\n\n\n\nif __name__ == \"__main__\":\n #sys.exit(main())\n\n giro_data = merge_files(get_filenames(folder=\"D:/Dropbox/Finanzen/Ing DIBA/Input/\", extension=\"*.csv\"),\n drop_duplicates=True)\n giro_data.to_csv(\"D:/Dropbox/Finanzen/Ing DIBA/output/aggregated_giro.csv\", sep=\";\", quotechar='\"',\n encoding=\"Cp1252\", decimal=\",\")\n depot, depot_cum_value = parse_depot_files()\n plot_account_data(giro_data, depot_cum_value)\n\n clf, vectorizor, label_encoder = create_classifier()\n giro_data_classified = classify(giro_data, clf, vectorizor, label_encoder)\n giro_data_classified[\"year\"] = giro_data_classified[\"Buchung\"].apply(lambda x: x.year)\n giro_data_classified.to_csv(\"D:/Dropbox/Finanzen/Ing DIBA/output/classified_giro.csv\", sep=\";\", quotechar='\"',\n encoding=\"Cp1252\", decimal=\",\")\n\n first_day = giro_data[\"Buchung\"].min()\n last_day = giro_data[\"Buchung\"].max()\n duration = last_day - first_day\n days = duration.days\n\n print(\"Average daily expensenses over all years:\")\n grouped_all = giro_data_classified.groupby(\"Labels\").sum().sort_values(\"Betrag\")[\"Betrag\"].div(days)\n print(grouped_all)\n print(\"Sum: \", grouped_all.sum())\n\n # Only 2016\n year = 2017\n current_year = giro_data_classified[giro_data_classified[\"year\"] == year]\n first_day = current_year[\"Buchung\"].min()\n last_day = current_year[\"Buchung\"].max()\n duration = last_day - first_day\n days = duration.days\n print(\"\\ncurrent year (%i):\" % year)\n grouped_current = current_year.groupby(\"Labels\").sum().sort_values(\"Betrag\")[\"Betrag\"].div(days)\n print(grouped_current)\n print(\"Sum: \", grouped_current.sum())\n\n print(giro_data_classified.groupby(\"year\").sum().sort_index()[\"Betrag\"])\n print(giro_data.pivot_table(index=\"Labels\", columns=\"year\", values=\"Betrag\", aggfunc=\"sum\", fill_value=0))\n\n grouped = pd.DataFrame(grouped_all).join(pd.DataFrame(grouped_current), rsuffix=\"_current\" )\n grouped[\"diff_abs\"] = grouped.diff(axis=1).ix[:,-1]\n grouped[\"diff_percentage\"] = grouped[\"diff_abs\"].div(grouped[\"Betrag\"])*100\n\n print(\"\\nComparison:\")\n print(grouped.sort_values(\"diff_abs\"))", "sub_path": "account_main_pandas.py", "file_name": "account_main_pandas.py", "file_ext": "py", "file_size_in_byte": 13331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 36, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 147, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 164, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 180, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 184, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 188, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 188, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 197, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 207, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 211, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 221, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 238, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 243, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 267, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 335, "usage_type": "call"}]} {"seq_id": "7947342", "text": "from datetime import datetime\n\nfrom peewee import *\nfrom playhouse.shortcuts import ReconnectMixin\nfrom playhouse.pool import PooledMySQLDatabase\nfrom playhouse.mysql_ext import JSONField\n\nfrom goods_srv.settings import settings\n\nclass ReconnectMySQLDatabase(ReconnectMixin, PooledMySQLDatabase):\n pass\n\ndb = ReconnectMySQLDatabase(\"mxshop_goods_srv\", host=\"192.168.0.104\", port=3306, user=\"root\", password=\"root\")\n\n#ๅˆ ้™ค - ็‰ฉ็†ๅˆ ้™คๅ’Œ้€ป่พ‘ๅˆ ้™ค - ็‰ฉ็†ๅˆ ้™ค -ๅ‡่ฎพไฝ ๆŠŠๆŸไธช็”จๆˆทๆ•ฐๆฎ - ็”จๆˆท่ดญไนฐ่ฎฐๅฝ•๏ผŒ็”จๆˆท็š„ๆ”ถ่—่ฎฐๅฝ•๏ผŒ็”จๆˆทๆต่งˆ่ฎฐๅฝ•ๅ•Š\n#้€š่ฟ‡saveๆ–นๆณ•ๅšไบ†ไฟฎๆ”นๅฆ‚ไฝ•็กฎไฟๅชไฟฎๆ”นupdate_timeๅ€ผ่€Œไธๆ˜ฏไฟฎๆ”นadd_time\nclass BaseModel(Model):\n add_time = DateTimeField(default=datetime.now, verbose_name=\"ๆทปๅŠ ๆ—ถ้—ด\")\n is_deleted = BooleanField(default=False, verbose_name=\"ๆ˜ฏๅฆๅˆ ้™ค\")\n update_time = DateTimeField(verbose_name=\"ๆ›ดๆ–ฐๆ—ถ้—ด\", default=datetime.now)\n\n def save(self, *args, **kwargs):\n #ๅˆคๆ–ญ่ฟ™ๆ˜ฏไธ€ไธชๆ–ฐๆทปๅŠ ็š„ๆ•ฐๆฎ่ฟ˜ๆ˜ฏๆ›ดๆ–ฐ็š„ๆ•ฐๆฎ\n if self._pk is not None:\n #่ฟ™ๆ˜ฏไธ€ไธชๆ–ฐๆ•ฐๆฎ\n self.update_time = datetime.now()\n return super().save(*args, **kwargs)\n\n @classmethod\n def delete(cls, permanently=False): #permanently่กจ็คบๆ˜ฏๅฆๆฐธไน…ๅˆ ้™ค\n if permanently:\n return super().delete()\n else:\n return super().update(is_deleted=True)\n\n def delete_instance(self, permanently=False, recursive=False, delete_nullable=False):\n if permanently:\n return self.delete(permanently).where(self._pk_expr()).execute()\n else:\n self.is_deleted = True\n self.save()\n\n @classmethod\n def select(cls, *fields):\n return super().select(*fields).where(cls.is_deleted==False)\n\n class Meta:\n database = settings.DB\n\n\nclass Category(BaseModel):\n name = CharField(max_length=20, verbose_name=\"ๅ็งฐ\")\n parent_category = ForeignKeyField(\"self\", verbose_name=\"็ˆถ็ฑปๅˆซ\", null=True) #ไธ€็บง็ฑปๅˆซๅฏไปฅๆฒกๆœ‰็ˆถ็ฑปๅˆซ\n level = IntegerField(default=1, verbose_name=\"็บงๅˆซ\")\n is_tab = BooleanField(default=False, verbose_name=\"ๆ˜ฏๅฆๆ˜พ็คบๅœจ้ฆ–้กตtab\")\n\n\nclass Brands(BaseModel):\n #ๅ“็‰Œ\n name = CharField(max_length=50, verbose_name=\"ๅ็งฐ\", index=True, unique=True)\n logo = CharField(max_length=200, null=True, verbose_name=\"ๅ›พๆ ‡\", default=\"\")\n\n\nclass Goods(BaseModel):\n \"\"\"\n ๅ•†ๅ“๏ผŒ ๅˆ†ๅธƒๅผ็š„ไบ‹ๅŠกๆœ€ๅฅฝ็š„่งฃๅ†ณๆ–นๆกˆ ๅฐฑๆ˜ฏไธ่ฆ่ฎฉๅˆ†ๅธƒๅผไบ‹ๅŠกๅ‡บ็Žฐ\n \"\"\"\n category = ForeignKeyField(Category, verbose_name=\"ๅ•†ๅ“็ฑป็›ฎ\", on_delete='CASCADE')\n brand = ForeignKeyField(Brands, verbose_name=\"ๅ“็‰Œ\", on_delete='CASCADE')\n on_sale = BooleanField(default=True, verbose_name=\"ๆ˜ฏๅฆไธŠๆžถ\")\n goods_sn = CharField(max_length=50, default=\"\", verbose_name=\"ๅ•†ๅ“ๅ”ฏไธ€่ดงๅท\")\n name = CharField(max_length=100, verbose_name=\"ๅ•†ๅ“ๅ\")\n click_num = IntegerField(default=0, verbose_name=\"็‚นๅ‡ปๆ•ฐ\")\n sold_num = IntegerField(default=0, verbose_name=\"ๅ•†ๅ“้”€ๅ”ฎ้‡\")\n fav_num = IntegerField(default=0, verbose_name=\"ๆ”ถ่—ๆ•ฐ\") #ๅบ“ๅญ˜ๆ˜ฏ็”ตๅ•†ไธญไธ€ไธช้‡่ฆ็š„็Žฏ่Š‚\n market_price = FloatField(default=0, verbose_name=\"ๅธ‚ๅœบไปทๆ ผ\")\n shop_price = FloatField(default=0, verbose_name=\"ๆœฌๅบ—ไปทๆ ผ\")\n goods_brief = CharField(max_length=200, verbose_name=\"ๅ•†ๅ“็ฎ€็Ÿญๆ่ฟฐ\")\n ship_free = BooleanField(default=True, verbose_name=\"ๆ˜ฏๅฆๆ‰ฟๆ‹…่ฟ่ดน\")\n images = JSONField(verbose_name=\"ๅ•†ๅ“่ฝฎๆ’ญๅ›พ\")\n desc_images = JSONField(verbose_name=\"่ฏฆๆƒ…้กตๅ›พ็‰‡\")\n goods_front_image = CharField(max_length=200, verbose_name=\"ๅฐ้ขๅ›พ\")\n is_new = BooleanField(default=False, verbose_name=\"ๆ˜ฏๅฆๆ–ฐๅ“\")\n is_hot = BooleanField(default=False, verbose_name=\"ๆ˜ฏๅฆ็ƒญ้”€\")\n\n\nclass GoodsCategoryBrand(BaseModel):\n #ๅ“็‰Œๅˆ†็ฑป\n id = AutoField(primary_key=True, verbose_name=\"id\")\n category = ForeignKeyField(Category, verbose_name=\"็ฑปๅˆซ\")\n brand = ForeignKeyField(Brands, verbose_name=\"ๅ“็‰Œ\")\n\n class Meta:\n indexes = (\n #่”ๅˆไธป้”ฎ\n ((\"category\", \"brand\"), True),\n )\n\n\nclass Banner(BaseModel):\n \"\"\"\n ่ฝฎๆ’ญ็š„ๅ•†ๅ“\n \"\"\"\n image = CharField(max_length=200, default=\"\", verbose_name=\"ๅ›พ็‰‡url\")\n url = CharField(max_length=200, default=\"\", verbose_name=\"่ฎฟ้—ฎurl\")\n index = IntegerField(default=0, verbose_name=\"่ฝฎๆ’ญ้กบๅบ\")\n\nif __name__ == \"__main__\":\n db.create_tables([Category,Goods, Brands, GoodsCategoryBrand, Banner])\n # # c1 = Category(name=\"bobby1\", level=1)\n # # c2 = Category(name=\"bobby2\", level=1)\n # # c1.save()\n # # c2.save()\n # for c in Category.select():\n # print(c.name, c.id)\n # c1 = Category.get(Category.id==1)\n # c1.delete_instance(permanently=True)\n # # Category.delete().where(Category.id==2).execute()\n\n", "sub_path": "other/mxshop_srvs/goods_srv/model/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4849, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "playhouse.shortcuts.ReconnectMixin", "line_number": 10, "usage_type": "name"}, {"api_name": "playhouse.pool.PooledMySQLDatabase", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "goods_srv.settings.settings.DB", "line_number": 48, "usage_type": "attribute"}, {"api_name": "goods_srv.settings.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "playhouse.mysql_ext.JSONField", "line_number": 80, "usage_type": "call"}, {"api_name": "playhouse.mysql_ext.JSONField", "line_number": 81, "usage_type": "call"}]} {"seq_id": "466458094", "text": "import requests\nfrom os import environ\nfrom bs4 import BeautifulSoup\nfrom util.srbColour import Colour\n\nfrom util.getter import get_num, get_year, get_branch, get_college, get_c_id, get_branch_name\n\nclass Student:\n try:\n proxyDict = {\n 'http_proxy': environ['http_proxy'],\n 'https_proxy': environ['https_proxy'],\n 'ftp_proxy': environ['ftp_proxy']\n }\n except KeyError:\n proxyDict = None\n\n def __init__(self,roll):\n roll = str(roll)\n self.inflate_data(roll)\n\n def inflate_data(self,roll):\n \"\"\"\n inflate all the data that can be extracted using roll no\n \"\"\"\n self.year = get_year(roll)\n self.num = get_num(roll)\n self.branch = get_branch(roll)\n self.branch_name = get_branch_name(self.branch)\n self.c_id = get_c_id(roll)\n self.college = get_college(roll)\n self.roll_num = self.year+self.branch+self.num\n if(roll[0]=='i'):\n self.roll_num = \"iiitu\" + self.year + self.branch[3:] + self.num\n self.name = \"\"\n self.sgpa = \"\"\n self.cgpa = \"\"\n self.points = \"\"\n self.rank = '0'\n self.g_rank = '0'\n self.gender = ''\n\n def cached_data(self,name,gender,sgpa,cgpa,points,rank,g_rank):\n self.name = name\n self.sgpa = sgpa\n self.cgpa = cgpa\n self.points = points\n self.rank = rank\n self.g_rank = g_rank\n self.gender = gender\n\n def fetch_data(self):\n try:\n url = \"http://14.139.56.15/\"+self.c_id+self.year+\"/studentresult/details.asp\"\n page = requests.post(url,data={'RollNumber':self.roll_num},proxies=Student.proxyDict,verify=False)\n soup = BeautifulSoup(page.text,'lxml')\n self.all_data = soup.find_all(class_='ewTable')\n self.name=self.all_data[0].find_all('tr')[0].find_all('td')[1].text.strip()\n self.name=self.name.upper()\n res = self.all_data[-1].find_all('tr')[1].find_all('td')\n self.sgpa = res[0].text.strip().split(\"=\")[1]\n cgpa_ = res[2].text.strip()\n self.points = cgpa_.split(\"/\")[0]\n self.cgpa = cgpa_.split(\"=\")[1]\n except:\n self.name = '-'\n self.sgpa = self.points = self.cgpa = '0'\n\n def get_result(self):\n out = self.roll_num +\"\\n\\t\" + self.name + \"\\n\\t\" + self.sgpa + \"\\n\\t\" +self.points+ \"\\n\\t\" + self.cgpa\n return out\n\n def print_info(self):\n out = Colour.GREEN + \"roll_bum : \" + Colour.YELLOW + self.roll_num + \"\\n\" \\\n + Colour.GREEN + \"name : \" + Colour.YELLOW + self.name + \"\\n\" \\\n + Colour.GREEN + \"rank : \" + Colour.YELLOW + self.rank + \"\\n\" \\\n + Colour.GREEN + \"gender : \" + Colour.YELLOW + self.gender + \"\\n\" \\\n + Colour.GREEN + \"branch : \" + Colour.YELLOW + self.branch + \"\\n\" \\\n + Colour.GREEN + \"g_rank : \" + Colour.YELLOW + self.g_rank + \"\\n\" \\\n + Colour.GREEN + \"points : \" + Colour.YELLOW + self.points + \"\\n\" \\\n + Colour.GREEN + \"cpga : \" + Colour.YELLOW + self.cgpa + \"\\n\" \\\n + Colour.END\n return out\n\n", "sub_path": "util/student.py", "file_name": "student.py", "file_ext": "py", "file_size_in_byte": 3287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.environ", "line_number": 11, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "name"}, {"api_name": "util.getter.get_year", "line_number": 26, "usage_type": "call"}, {"api_name": "util.getter.get_num", "line_number": 27, "usage_type": "call"}, {"api_name": "util.getter.get_branch", "line_number": 28, "usage_type": "call"}, {"api_name": "util.getter.get_branch_name", "line_number": 29, "usage_type": "call"}, {"api_name": "util.getter.get_c_id", "line_number": 30, "usage_type": "call"}, {"api_name": "util.getter.get_college", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 56, "usage_type": "call"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 74, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 74, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 74, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 75, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 75, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 76, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 76, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 76, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 77, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 77, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 77, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 78, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 78, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 78, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 79, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 79, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 80, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 80, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 80, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.GREEN", "line_number": 81, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 81, "usage_type": "name"}, {"api_name": "util.srbColour.Colour.YELLOW", "line_number": 81, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour.END", "line_number": 82, "usage_type": "attribute"}, {"api_name": "util.srbColour.Colour", "line_number": 82, "usage_type": "name"}]} {"seq_id": "40622606", "text": "# coding: utf-8\n'''\nCreated on 2016ๅนด6ๆœˆ19ๆ—ฅ\n\n@author: tack\n'''\nimport datetime\nimport logging\nimport os\nfrom logging.handlers import RotatingFileHandler\n\n\ndef __getlog(name='mylog'):\n '''\n get a logging instance by name\n '''\n LOGGING_FORMAT = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s'\n DATE_FORMAT = '%y%m%d %H:%M:%S'\n formatter = logging.Formatter(LOGGING_FORMAT, DATE_FORMAT)\n # formatter = logging.Formatter('%(asctime)s:%(message)s')\n log = logging.getLogger(name)\n handler = RotatingFileHandler('logs/' + name + '.log', maxBytes=50 * 1024 * 1024, backupCount=10)\n # handler = logging.FileHandler('logs/' + name + '.log')\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(logging.INFO)\n return log\n\n\ndef __state_log(name='state'):\n '''\n create a logger instance\n :param name:\n :return:\n '''\n # LOGGING_FORMAT = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s'\n log = logging.getLogger('.')\n handler = logging.FileHandler(name)\n log.addHandler(handler)\n log.setLevel(logging.INFO)\n return log\n\ndef read_last_state(name='state'):\n stat = {}\n if os.path.exists('state'):\n for line in open('state', 'r'):\n arr = line.strip().split(',')\n code = arr[0]\n # dt = arr[1].replace('-', '')\n dt = arr[1]\n if code in stat:\n if dt > stat[code]:\n stat[code] = dt\n else:\n stat[code] = dt\n return stat\n\n\nmylog = __getlog()\n__state = read_last_state()\nstatelog = __state_log()\n\n\ndef get_laste_update_dt(code):\n return __state.get(code, None)\n\ndef mkdirs(path):\n '''\n make dirs\n '''\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef today_last_year(years=1):\n lasty = datetime.datetime.today().date() + datetime.timedelta(-365 * years)\n return str(lasty)\n\n\ndef day_befor(days=1):\n lasty = datetime.datetime.today().date() - datetime.timedelta(days=days)\n return str(lasty)\n\n\nif __name__ == '__main__':\n print(day_befor(10))\n", "sub_path": "org/tradesafe/utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.Formatter", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 81, "usage_type": "call"}]} {"seq_id": "200839419", "text": "import argparse \nfrom datetime import datetime\nimport os\nimport re\n\n\nclass NotesManager:\n \"\"\"\n Note file format: YYYY-MM-DD_TYPE_detail_seq.txt \n e.g. 2018-01-01-meeting-generic.txt\n 2018-01-02_one-on-one_rohit.txt\n 2018-01-02_interview_shiva-sundaram.txt\n \"\"\"\n def __init__(self):\n self.BASEDIR='./data/'\n\n\n def create_file(self, filename):\n f=open(\"./data/\" + filename,\"a+\")\n f.close()\n\n def list(self, category=None):\n if category is None: \n category = '' \n for filename in os.listdir(self.BASEDIR):\n if category in filename:\n print(filename)\n\n \n def create(self, category, detail):\n if category is None:\n category='meeting'\n if detail is None:\n detail='self'\n now = datetime.now()\n date_string= datetime.today().strftime('%Y-%m-%d')\n self.create_file('_'.join([date_string, category, detail]) + '.txt')\n\n \nif __name__ =='__main__':\n nm = NotesManager()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--new\", action=\"store_true\", help=\"create a new note\\n usage: notes.py -c category detail\")\n parser.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"list most recent notes\")\n parser.add_argument(\"-o\", \"--overwrite\", action=\"store_true\", help=\"overwrite previous note\")\n parser.add_argument(\"-c\", \"--category\", help=\"category of the notes, e.g. meeting, todo, oneonone\")\n parser.add_argument(\"-d\", \"--detail\", help=\"additional details for the notes e.g. meeting subject, 1_1 person\")\n args = parser.parse_args()\n if args.list:\n nm.list(args.category)\n elif args.new:\n nm.create(args.category, args.detail)\n nm.list(args.category)\n else:\n nm.list(args.category)", "sub_path": "scripts/notes.py", "file_name": "notes.py", "file_ext": "py", "file_size_in_byte": 1821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call"}]} {"seq_id": "327450743", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 1 14:04:55 2020\nๅ‰ตๅปบ104็ˆฌ่Ÿฒ\n@author: ่ป’ไบญ\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nxx=\"้ซ˜่–ช\" #ๆœๅฐ‹ๅทฅไฝœ้กžๅž‹ \n\nfor i in range(1,10):\n url=f\"https://www.104.com.tw/jobs/search/?ro=0&kwop=7&keyword={xx}&area=6001008000&order=11&asc=0&page={i}&mode=s\"\n aa=requests.get(url)\n bs=BeautifulSoup(aa.text, 'html.parser')\n ww=bs.find_all(class_=\"b-block--top-bord job-list-item b-clearfix js-job-item\")\n for dd in ww:\n cc=dd.find(class_=\"b-tit\")\n ee=dd.find(class_=\"b-block__left\")\n print(cc.a.text.strip())\n print(cc.a[\"href\"])\n try:\n \n print(ee.p.text.strip())\n \n except BaseException as k:\n print(k)\n print(\"---------------------------------------------------\")\n ", "sub_path": "104.py", "file_name": "104.py", "file_ext": "py", "file_size_in_byte": 849, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}]} {"seq_id": "565417885", "text": "\"\"\"\nYouTube API For POTATO BOT.\nMade By stshrewsburyDev (AKA: Steven Shrewsbury)\n\"\"\"\n\nimport argparse\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\ndef search(DEV_KEY, options):\n youtube = build(\"youtube\", \"v3\", developerKey=DEV_KEY)\n\n search_response = youtube.search().list(q=options.q,\n part=\"id,snippet\",\n maxResults=options.max_results\n ).execute()\n\n results = {}\n\n videos = []\n channels = []\n playlists = []\n\n for search_result in search_response.get(\"items\", []):\n if search_result[\"id\"][\"kind\"] == \"youtube#video\":\n videos.append(search_result)\n elif search_result[\"id\"][\"kind\"] == \"youtube#channel\":\n channels.append(search_result)\n elif search_result[\"id\"][\"kind\"] == \"youtube#playlist\":\n playlists.append(search_result)\n\n results[\"videos\"] = videos\n results[\"channels\"] = channels\n results[\"playlists\"] = playlists\n\n return results\n\ndef get_videos(DEV_KEY, search_query, max_results):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--q\", help=\"Serch term\", default=search_query)\n parser.add_argument(\"--max-results\", help=\"Max results\", default=max_results)\n options = parser.parse_args()\n\n return search(DEV_KEY=DEV_KEY,\n options=options)[\"videos\"]\n", "sub_path": "libs/YouTubeAPI.py", "file_name": "YouTubeAPI.py", "file_ext": "py", "file_size_in_byte": 1464, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "googleapiclient.discovery.build", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}]} {"seq_id": "171795086", "text": "# Copyright 2009-2013 Eucalyptus Systems, Inc.\n#\n# Redistribution and use of this software in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport argparse\nimport base64\nimport datetime\nimport hashlib\nimport os.path\nfrom requestbuilder import Arg\nfrom requestbuilder.auth import BaseAuth\nfrom requestbuilder.exceptions import ArgumentError\nimport subprocess\nimport urlparse\nimport urllib\n\n\nclass EucaRsaV2Auth(BaseAuth):\n \"\"\"Provides authentication for inter-component requests\"\"\"\n\n ARGS = [Arg('--cert', metavar='FILE', help='''file containing the X.509\n certificate to use when signing requests'''),\n Arg('--privatekey', metavar='FILE',\n help='file containing the private key to sign requests with'),\n Arg('--spoof-key-id', metavar='KEY_ID',\n help='run this command as if signed by a specific access key'),\n Arg('--euca-auth', action='store_true', help=argparse.SUPPRESS)]\n\n # noinspection PyExceptionInherit\n def configure(self):\n BaseAuth.configure(self)\n if not self.args.get('spoof_key_id'):\n self.args['spoof_key_id'] = os.getenv('EC2_ACCESS_KEY')\n\n cert = self.args.get('cert') or os.getenv('EUCA_CERT')\n privkey = self.args.get('privatekey') or os.getenv('EUCA_PRIVATE_KEY')\n if not cert:\n raise ArgumentError('argument --cert or environment variable '\n 'EUCA_CERT is required')\n if not privkey:\n raise ArgumentError('argument --privatekey or environment '\n 'variable EUCA_PRIVATE_KEY is required')\n cert = os.path.expanduser(os.path.expandvars(cert))\n privkey = os.path.expanduser(os.path.expandvars(privkey))\n if not os.path.exists(cert):\n raise ArgumentError(\"certificate file '{0}' does not exist\"\n .format(cert))\n if not os.path.isfile(cert):\n raise ArgumentError(\"certificate file '{0}' is not a file\"\n .format(cert))\n if not os.path.exists(privkey):\n raise ArgumentError(\"private key file '{0}' does not exist\"\n .format(privkey))\n if not os.path.isfile(privkey):\n raise ArgumentError(\"private key file '{0}' is not a file\"\n .format(privkey))\n self.args['cert'] = cert\n self.args['privatekey'] = privkey\n\n def __call__(self, request):\n if request.headers is None:\n request.headers = {}\n now = datetime.datetime.utcnow()\n request.headers['Date'] = now.strftime('%Y%m%dT%H%M%SZ')\n if 'Authorization' in request.headers:\n del request.headers['Authorization']\n if self.args.get('spoof_key_id'):\n request.headers['AWSAccessKeyId'] = self.args['spoof_key_id']\n elif 'AWSAccessKeyId' in request.headers:\n del request.headers['AWSAccessKeyId']\n\n cert_fp = self._get_fingerprint()\n self.log.debug('certificate fingerprint: %s', cert_fp)\n\n headers_to_sign = self._get_headers_to_sign(request)\n signed_headers = self._get_signed_headers(headers_to_sign)\n self.log.debug('SignedHeaders:%s', signed_headers)\n\n canonical_request = self._get_canonical_request(request)\n self.log.debug('CanonicalRequest:\\n%s', canonical_request)\n signature = self._sign(canonical_request)\n self.log.debug('Signature:%s', signature)\n\n auth_header = ' '.join(('EUCA2-RSA-SHA256', cert_fp, signed_headers,\n signature))\n request.headers['Authorization'] = auth_header\n\n def _get_fingerprint(self):\n cmd = ['openssl', 'x509', '-noout', '-in', self.args['cert'],\n '-fingerprint', '-md5']\n openssl = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n (stdout, __) = openssl.communicate()\n if openssl.returncode != 0:\n raise subprocess.CalledProcessError(openssl.returncode, cmd)\n return stdout.strip().rsplit('=', 1)[-1].replace(':', '').lower()\n\n def _sign(self, canonical_request):\n digest = hashlib.sha256(canonical_request).digest()\n cmd = ['openssl', 'pkeyutl', '-sign', '-inkey',\n self.args['privatekey'], '-pkeyopt', 'digest:sha256']\n openssl = subprocess.Popen(cmd, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n (stdout, __) = openssl.communicate(digest)\n if openssl.returncode != 0:\n raise subprocess.CalledProcessError(openssl.returncode, cmd)\n return base64.b64encode(stdout)\n\n def _get_canonical_request(self, request):\n # 1. request method\n method = request.method.upper()\n # 2. CanonicalURI\n c_uri = self._get_canonical_uri(request)\n # 3. CanonicalQueryString\n c_querystr = self._get_canonical_querystr(request)\n # 4. CanonicalHeaders\n headers_to_sign = self._get_headers_to_sign(request)\n c_headers = self._get_canonical_headers(headers_to_sign)\n # 5. SignedHeaders\n s_headers = self._get_signed_headers(headers_to_sign)\n\n return '\\n'.join((method, c_uri, c_querystr, c_headers, s_headers))\n\n def _get_canonical_uri(self, request):\n return urlparse.urlparse(request.url).path or '/'\n\n def _get_canonical_querystr(self, request):\n params = []\n for key, val in request.params.iteritems():\n params.append('='.join((urllib.quote(key, safe='/~'),\n urllib.quote(str(val), safe='~'))))\n return '&'.join(sorted(params))\n\n def _get_headers_to_sign(self, request):\n headers = {'Host': urlparse.urlparse(request.url).netloc}\n for key, val in request.headers.iteritems():\n if key.lower() != 'authorization':\n headers[key] = val\n return headers\n\n def _get_canonical_headers(self, headers):\n header_strs = [str(key).lower().strip() + ':' + str(val).strip()\n for key, val in headers.iteritems()]\n return '\\n'.join(sorted(header_strs))\n\n def _get_signed_headers(self, headers):\n return ';'.join(sorted(header.lower().strip() for header in headers))\n", "sub_path": "euca2ools/nc/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 7516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requestbuilder.auth.BaseAuth", "line_number": 39, "usage_type": "name"}, {"api_name": "requestbuilder.Arg", "line_number": 42, "usage_type": "call"}, {"api_name": "requestbuilder.Arg", "line_number": 44, "usage_type": "call"}, {"api_name": "requestbuilder.Arg", "line_number": 46, "usage_type": "call"}, {"api_name": "requestbuilder.Arg", "line_number": 48, "usage_type": "call"}, {"api_name": "argparse.SUPPRESS", "line_number": 48, "usage_type": "attribute"}, {"api_name": "requestbuilder.auth.BaseAuth.configure", "line_number": 52, "usage_type": "call"}, {"api_name": "requestbuilder.auth.BaseAuth", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.getenv", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "name"}, {"api_name": "os.path.getenv", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.getenv", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "name"}, {"api_name": "requestbuilder.exceptions.ArgumentError", "line_number": 59, "usage_type": "call"}, {"api_name": "requestbuilder.exceptions.ArgumentError", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.path.expanduser", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.path.expandvars", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.path.expanduser", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.path.expandvars", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 66, "usage_type": "name"}, {"api_name": "requestbuilder.exceptions.ArgumentError", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 69, "usage_type": "name"}, {"api_name": "requestbuilder.exceptions.ArgumentError", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "requestbuilder.exceptions.ArgumentError", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 75, "usage_type": "name"}, {"api_name": "requestbuilder.exceptions.ArgumentError", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 112, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 112, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 115, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 119, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 122, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 126, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 127, "usage_type": "call"}, {"api_name": "urlparse.urlparse", "line_number": 145, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 150, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 151, "usage_type": "call"}, {"api_name": "urlparse.urlparse", "line_number": 155, "usage_type": "call"}]} {"seq_id": "589742194", "text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright ยฉ 2022 Pi-Yueh Chuang \n#\n# Distributed under terms of the BSD 3-Clause license.\n\n\"\"\"Total losses versus training iterations.\n\"\"\"\nimport sys\nimport pathlib\nfrom matplotlib import pyplot\n\n# find helpers\nsys.path.insert(0, str(pathlib.Path(__file__).resolve().parents[1].joinpath(\"modulus\")))\nfrom helpers.tbreader import read_tensorboard_data # pylint: disable=import-error # noqa: E402\n\n# matplotlib configuration\npyplot.rcParams.update({\n \"text.usetex\": True,\n \"font.family\": \"serif\",\n \"font.serif\": [\"P052\", \"Pagella\", \"Palatino\", \"Palatino Linotype\", \"Times New Roman\"],\n \"figure.constrained_layout.use\": True,\n})\n\n# directories\nrootdir = pathlib.Path(__file__).resolve().parents[1]\nmodulusdir = rootdir.joinpath(\"modulus\", \"cylinder-2d-re200-zero-ic\")\nrootdir.joinpath(\"figures\").mkdir(exist_ok=True)\n\n# read data\ndata1 = read_tensorboard_data(modulusdir.joinpath(\"nn_256\", \"outputs\"))\ndata2 = read_tensorboard_data(modulusdir.joinpath(\"nn_512\", \"outputs\"))\n\n# plot\nfig, ax = pyplot.subplots(1, 1, figsize=(6, 3), dpi=166)\nfig.suptitle(\"Convergence history: total loss v.s. iteration\")\nax.semilogy(data1[\"step\"], data1[\"loss\"].ewm(span=10).mean(), label=\"256 neurons per layer\")\nax.semilogy(data2[\"step\"], data2[\"loss\"].ewm(span=10).mean(), label=\"512 neurons per layer\")\nax.set_xlabel(\"Iteration\")\nax.set_ylabel(\"Total loss\")\nax.legend(loc=0)\nfig.savefig(rootdir.joinpath(\"figures\", \"cylinder-pinn-training-convergence.png\"), bbox_inches=\"tight\", dpi=166)\n", "sub_path": "postprocessing/cylinder_pinn_training_convergence.py", "file_name": "cylinder_pinn_training_convergence.py", "file_ext": "py", "file_size_in_byte": 1572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.insert", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "helpers.tbreader.read_tensorboard_data", "line_number": 33, "usage_type": "call"}, {"api_name": "helpers.tbreader.read_tensorboard_data", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}]} {"seq_id": "531561312", "text": "#็ฌฌ 0004 ้ข˜๏ผš ไปปไธ€ไธช่‹ฑๆ–‡็š„็บฏๆ–‡ๆœฌๆ–‡ไปถ๏ผŒ็ปŸ่ฎกๅ…ถไธญ็š„ๅ•่ฏๅ‡บ็Žฐ็š„ไธชๆ•ฐ\n# Author : weijiansun\n# -*-coding:utf-8-*-\n\nimport collections\nimport re\nimport sys\n\ndef count(filename = 'test.txt'):\n file=open(filename,'r')\n data=file.read()\n dic=collections.defaultdict(lambda:0)\n data =re.sub(r'[\\W\\d]',' ',data)\n datalist=data.split(' ')\n for item in datalist:\n dic[item]+=1\n del dic['']\n return dic\n\t\nif __name__ ==\"__main__\":\n print(sorted(count().items()))\n \n\n", "sub_path": "Show me the code/0004/0004.py", "file_name": "0004.py", "file_ext": "py", "file_size_in_byte": 513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "collections.defaultdict", "line_number": 12, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 13, "usage_type": "call"}]} {"seq_id": "301183266", "text": "import random\r\nimport time\r\nimport json\r\nimport discord\r\n\r\nclass character():\r\n # Parameterized constructor for character class object\r\n def __init__(self, name, level, battleclass):\r\n self.name = name\r\n self.level = level\r\n self.battleclass = battleclass\r\n self.stats = [] # Possible dict conversion with format: {\"Health\" : \"\" , \"Attack\" : \"\" , \"Defense\" : \"\" , \"Speed\" : \"\" , \"Critical\" : \"\"}\r\n self.health = 100\r\n\r\n # Assigns character object stats variable based on characterClass\r\n if battleclass == \"warrior\":\r\n self.attack = random.randint(1, 20)\r\n self.defense = random.randint(20, 20)\r\n self.speed = random.randint(1, 20)\r\n self.critical = random.randint(1, 20)\r\n self.stats.insert(0, self.health)\r\n self.stats.insert(1, self.attack)\r\n self.stats.insert(2, self.defense)\r\n self.stats.insert(3, self.speed)\r\n self.stats.insert(4, self.critical)\r\n\r\n elif battleclass == \"ranger\":\r\n self.attack = random.randint(1, 20)\r\n self.defense = random.randint(1, 20)\r\n self.speed = random.randint(20, 20)\r\n self.critical = random.randint(1, 20)\r\n self.stats.insert(0, self.health)\r\n self.stats.insert(1, self.attack)\r\n self.stats.insert(2, self.defense)\r\n self.stats.insert(3, self.speed)\r\n self.stats.insert(4, self.critical)\r\n\r\n elif battleclass == \"castor\":\r\n self.attack = random.randint(20, 20)\r\n self.defense = random.randint(1, 20)\r\n self.speed = random.randint(1, 20)\r\n self.critical = random.randint(1, 20)\r\n self.stats.insert(0, self.health)\r\n self.stats.insert(1, self.attack)\r\n self.stats.insert(2, self.defense)\r\n self.stats.insert(3, self.speed)\r\n self.stats.insert(4, self.critical)\r\n\r\n # Prints the details of the character object\r\n def printDetails(self):\r\n embed = discord.Embed(title=\"Character Stats:\", color=0x00ff00)\r\n embed.set_thumbnail(url=\"https://orig00.deviantart.net/f025/f/2017/277/f/2/ffbe___cloud_strife_gif_2_by_zerolympiustrife-dbpjw5t.gif\")\r\n embed.add_field(name=\"Attack\", value=str(self.attack), inline=True)\r\n embed.add_field(name=\"Defense\", value=str(self.defense), inline=True)\r\n embed.add_field(name=\"Speed\", value=str(self.speed), inline=True)\r\n embed.add_field(name=\"Luck\", value=str(self.critical), inline=True)\r\n embed.add_field(name=\"Level\", value=str(self.level), inline=True)\r\n return embed\r\n\r\n # Series of accessors and mutators for character objects\r\n def setName(self, name):\r\n self.name = name\r\n\r\n def getName(self):\r\n return self.name\r\n\r\n def setLevel(self, level):\r\n self.level = level\r\n\r\n def getLevel(self):\r\n return self.level\r\n\r\n def setBattleclass(self, battleclass):\r\n self.battleclass = battleclass\r\n\r\n def getBattleclass(self):\r\n return self.battleclass\r\n\r\n # Returns a specific stat based on index ({0 - 4} / {Health - Critical})\r\n def getStats(self, value):\r\n return self.stats[value:value + 1]\r\n\r\n\r\n# Begin user adventure with character object creation\r\n# characterName = input(\"\\nEnter character name:\\n\")\r\n#\r\n#\r\n# tempLevel = int(input(\"\\nEnter character level:\\n\"))\r\n# if tempLevel != 1:\r\n# boolLevelFlag = False\r\n# else:\r\n# characterLevel = tempLevel\r\n# boolLevelFlag = True\r\n#\r\n# # Validate whether user has access to override character default level\r\n# while not boolLevelFlag:\r\n# print(\"Input Alert: You are trying to create a character level higher than 1.\")\r\n# passwordToken = input(\"\\nEnter your admin password:\\n\")\r\n#\r\n# if passwordToken == \"admin\":\r\n# print(\"Access Granted!\")\r\n# characterLevel = tempLevel\r\n# boolLevelFlag = True\r\n#\r\n# else:\r\n# print(\"Access Denied!\")\r\n# tempLevel = 1\r\n# characterLevel = tempLevel\r\n# boolLevelFlag = True\r\n#\r\n#\r\n# print(\"\\nValid battle classes:\"\r\n# \"\\nWarrior: Specializes in defense.\"\r\n# \"\\nRanger: Specializes in speed.\"\r\n# \"\\nCastor: Specializes in attack.\")\r\n#\r\n# tempBattleClass = input(\"\\nEnter your battle class:\\n\")\r\n#\r\n# # Validate user input for battle class\r\n# while not (tempBattleClass.lower() == \"warrior\" or tempBattleClass.lower() == \"ranger\"\r\n# or tempBattleClass.lower() == \"castor\"):\r\n# print(\"You must select a valid battle class.\")\r\n# tempBattleClass = input(\"\\nEnter your battle class:\\n\")\r\n#\r\n# characterClass = tempBattleClass.lower()\r\n\r\n\r\n# Function to create one of 3 random enemies using RNG\r\ndef randomEnemy(level):\r\n rng = random.randint(1, 3)\r\n if rng == 1:\r\n characterName = \"Goblin\"\r\n characterLevel = level\r\n characterClass = \"warrior\"\r\n\r\n elif rng == 2:\r\n characterName = \"Skeleton\"\r\n characterLevel = level\r\n characterClass = \"ranger\"\r\n\r\n else:\r\n characterName = \"Witch\"\r\n characterLevel = level\r\n characterClass = \"castor\"\r\n\r\n return character(characterName, characterLevel, characterClass)\r\n\r\n# ------------------ #\r\n# UNDER CONSTRUCTION #\r\n# ------------------ #\r\n\r\n# Begin user battle sequence\r\n# def battleSequence(character, enemy):\r\n# print(\"\\n\" + enemy.name + \" appears and challenges \" + character.name + \" to battle!\")\r\n# enemy.printDetails()\r\n#\r\n# while character.health > 0 and enemy.health > 0:\r\n# if character.health > 100:\r\n# character.health = 100\r\n#\r\n# if enemy.health > 100:\r\n# enemy.health = 100\r\n#\r\n# print(\"\\n\" + character.name + \"'s Health: \" + str(character.health))\r\n# print(enemy.name + \"'s Health: \" + str(enemy.health))\r\n#\r\n# print(\"\\nNext move:\\n1. Attack\\n2. Defend\\n3. Flee\\n\")\r\n# choice = int(input(\"\\nEnter the number of your choice:\\n\"))\r\n#\r\n# # Validate user choice\r\n# while not (choice == 1 or choice == 2 or choice == 3):\r\n# print(\"\\nPlease enter a valid choice!\\n\")\r\n# choice = int(input(\"\\nEnter the number of your choice:\\n\"))\r\n#\r\n# # Determine enemy turn\r\n# rng = random.randint(1, 2)\r\n#\r\n# # Situation 1: character -> ATTACK and enemy -> ATTACK\r\n# if choice == 1 and rng == 1:\r\n#\r\n# # Situation 1a: character attacks first\r\n# if character.speed > enemy.speed:\r\n# print(character.name + \" attacks first with Speed: \" + str(character.speed) + \"!\")\r\n# enemyDmgTaken = character.attack\r\n# critRoll = random.randint(1, 100)\r\n#\r\n# if critRoll <= character.critical:\r\n# print(\"It's super effective!\")\r\n# enemyDmgTaken += (character.attack / 2)\r\n#\r\n# print(character.name + \" strikes \" + enemy.name + \" for \" + str(enemyDmgTaken) + \" points of damage!\")\r\n# enemy.health -= enemyDmgTaken\r\n# print(enemy.name + \"'s Health: \" + str(enemy.health) + \"\\n\")\r\n#\r\n# # enemy attacks second\r\n# print(enemy.name + \" attacks second with Speed: \" + str(enemy.speed) + \"!\")\r\n# characterDmgTaken = enemy.attack\r\n# critRoll = random.randint(1, 100)\r\n#\r\n# if critRoll <= enemy.critical:\r\n# print(\"It's super effective!\")\r\n# characterDmgTaken += (enemy.attack / 2)\r\n#\r\n# print(enemy.name + \" strikes \" + character.name + \" for \" + str(characterDmgTaken) + \" points of damage!\")\r\n# character.health -= characterDmgTaken\r\n# print(character.name + \"'s Health: \" + str(character.health) + \"\\n\")\r\n#\r\n# # Situation 1b: enemy attacks first\r\n# else:\r\n# print(enemy.name + \" attacks first with Speed: \" + str(enemy.speed) + \"!\")\r\n# characterDmgTaken = enemy.attack\r\n# critRoll = random.randint(1, 100)\r\n#\r\n# if critRoll <= enemy.critical:\r\n# print(\"It's super effective!\")\r\n# characterDmgTaken += (enemy.attack / 2)\r\n#\r\n# print(enemy.name + \" strikes \" + character.name + \" for \" + str(characterDmgTaken) + \" points of damage!\")\r\n# character.health -= characterDmgTaken\r\n# print(character.name + \"'s Health: \" + str(character.health) + \"\\n\")\r\n#\r\n# # character attacks second\r\n# print(character.name + \" attacks second with Speed: \" + str(character.speed) + \"!\")\r\n# enemyDmgTaken = character.attack\r\n# critRoll = random.randint(1, 100)\r\n#\r\n# if critRoll <= character.critical:\r\n# print(\"It's super effective!\")\r\n# enemyDmgTaken += (character.attack / 2)\r\n#\r\n# print(character.name + \" strikes \" + enemy.name + \" for \" + str(enemyDmgTaken) + \" points of damage!\")\r\n# enemy.health -= enemyDmgTaken\r\n# print(enemy.name + \"'s Health: \" + str(enemy.health) + \"\\n\")\r\n#\r\n# # Situation 2: character -> ATTACK and enemy -> DEFEND\r\n# if choice == 1 and rng == 2:\r\n#\r\n# # character damages enemy after factoring in enemy's defense\r\n# print(character.name + \" attacks \" + enemy.name + \"!\")\r\n# enemyDmgTaken = character.attack\r\n# critRoll = random.randint(1, 100)\r\n# if critRoll <= character.critical:\r\n# print(\"It's super effective!\")\r\n# enemyDmgTaken += (character.attack / 2)\r\n#\r\n# print(enemy.name + \" defends against \" + character.name + \"'s attack!\")\r\n# enemyDmgTaken -= enemy.defense\r\n# critRoll = random.randint(1, 100)\r\n# if critRoll <= enemy.critical:\r\n# print(\"It's super effective!\")\r\n# enemyDmgTaken -= (enemy.defense / 2)\r\n#\r\n# if enemyDmgTaken > 0:\r\n# print(character.name + \" strikes \" + enemy.name + \" for \" + str(enemyDmgTaken) + \" points of damage!\")\r\n# enemy.health -= enemyDmgTaken\r\n#\r\n# elif enemyDmgTaken < 0:\r\n# print(enemy.name + \" blocks all damage from \" + character.name + \"!\")\r\n# print(\"+5 Health to \" + enemy.name)\r\n# enemy.health += 5\r\n#\r\n# else:\r\n# print(enemy.name + \" blocks all damage from \" + character.name + \"!\")\r\n#\r\n# # Situation 3: character -> DEFEND and enemy -> ATTACK\r\n# if choice == 2 and rng == 1:\r\n#\r\n# # enemy damages character after factoring in character's defense\r\n# print(enemy.name + \" attacks \" + character.name + \"!\")\r\n# characterDmgTaken = enemy.attack\r\n# critRoll = random.randint(1, 100)\r\n# if critRoll <= enemy.critical:\r\n# print(\"It's super effective!\")\r\n# characterDmgTaken += (enemy.attack / 2)\r\n#\r\n# print(character.name + \" defends against \" + enemy.name + \"'s attack!\")\r\n# characterDmgTaken -= character.defense\r\n# critRoll = random.randint(1, 100)\r\n# if critRoll <= character.critical:\r\n# print(\"It's super effective!\")\r\n# characterDmgTaken -= (character.defense / 2)\r\n#\r\n# if characterDmgTaken > 0:\r\n# print(enemy.name + \" strikes \" + character.name + \" for \" + str(characterDmgTaken) + \" points of damage!\")\r\n# character.health -= characterDmgTaken\r\n#\r\n# elif characterDmgTaken < 0:\r\n# print(character.name + \" blocks all damage from \" + enemy.name + \"!\")\r\n# print(\"+5 Health to \" + character.name)\r\n# character.health += 5\r\n#\r\n# else:\r\n# print(character.name + \" blocks all damage from \" + enemy.name + \"!\")\r\n#\r\n# print(character.name + \"'s Health: \" + str(character.health))\r\n#\r\n# # Situation 4: character -> DEFEND and character -> DEFEND\r\n# if choice == 2 and rng == 2:\r\n#\r\n# print(character.name + \" and \" + enemy.name +\" both defend against nothing?!\\n\")\r\n# print(\"Both adversaries begin to taunt each other from a distance!\")\r\n# print(\"The adrenaline from taunting causes both entities to heal a small amount of health!\")\r\n# print(\"\\n+5 Health to both \" + character.name + \" and \" + enemy.name + \"!\")\r\n# character.health += 5\r\n# enemy.health += 5\r\n#\r\n# # Situation 5: character -> FLEE and enemy -> ATTACK\r\n# if choice == 3 and rng == 1:\r\n#\r\n# print(character.name + \" attempts to flee from the battle!\\n\")\r\n# time.sleep(2)\r\n#\r\n# # Roll to determine outcome of attempted FLEE\r\n# fleeRoll = random.randint(1, 100)\r\n# if fleeRoll <= character.speed:\r\n# print(\"Success!\\n\")\r\n# print(character.name + \" flees from the battle to live another day!\")\r\n# enemy.health = 0\r\n#\r\n# else:\r\n# print(\"Fail!\\n\")\r\n# print(character.name + \" tries to flee but trips and lands on their face...\")\r\n# print(\"-5 Health to \" + character.name + \"\\n\")\r\n# character.health -= 5\r\n#\r\n# # enemy attacks character\r\n# print(enemy.name + \" attacks \" + character.name + \"!\")\r\n# characterDmgTaken = enemy.attack\r\n# critRoll = random.randint(1, 100)\r\n#\r\n# if critRoll <= enemy.critical:\r\n# print(\"It's super effective!\")\r\n# characterDmgTaken += (enemy.attack / 2)\r\n#\r\n# print(enemy.name + \" strikes \" + character.name + \" for \" + str(characterDmgTaken) + \" points of damage!\")\r\n# character.health -= characterDmgTaken\r\n# print(character.name + \"'s Health: \" + str(character.health) + \"\\n\")\r\n#\r\n# # Situation 6: character -> FLEE and enemy -> DEFEND\r\n# if choice == 3 and rng == 2:\r\n#\r\n# print(character.name + \" attempts to flee from the battle!\\n\")\r\n# time.sleep(2)\r\n#\r\n# # Roll to determine outcome of attempted FLEE\r\n# fleeRoll = random.randint(1, 100)\r\n# if fleeRoll <= character.speed:\r\n# print(\"Success!\\n\")\r\n# print(character.name + \" flees from the battle to live another day!\")\r\n# enemy.health = 0\r\n#\r\n# else:\r\n# print(\"Fail!\\n\")\r\n# print(character.name + \" tries to flee but trips and lands on their face...\")\r\n# print(enemy.name + \" laughs at \" + character.name + \"'s failure!\\n\")\r\n# print(\"The shame causes \" + character.name + \" to lose additional health...\")\r\n# print(\"-10 Health to \" + character.name)\r\n# character.health -= 10\r\n# print(character.name + \"'s Health: \" + str(character.health) + \"\\n\")\r\n#\r\n#\r\n# print(\"\\nEND OF BATTLE!\\n\")\r\n#\r\n#\r\n# if character.health > 0:\r\n# print(character.name + \" wins the battle against \" + enemy.name + \"!\\n\")\r\n#\r\n# else:\r\n# print(character.name + \" loses the battle against \" + enemy.name + \"!\\n\")\r\n\r\n\r\n# Reading from JSON file\r\n# with open(\"data.txt\") as json_file:\r\n# data = json.load(json_file)\r\n# for x in data['character']:\r\n# print(\"Character Name: \" + x[\"name\"])\r\n# print(\"Character Level: \" + x[\"level\"])\r\n# print(\"Character Class: \") + x[\"battleclass\"]\r\n# print(\"Character Stats: \" + x[\"stats\"] + \"\\n\")\r\n\r\n# Saving to JSON file\r\n# data = {}\r\n# data['character'] = []\r\n# data['character'].append({\r\n# 'name': characterName,\r\n# 'level': characterLevel,\r\n# 'battleclass': characterClass,\r\n# 'stats': character.stats\r\n# })\r\n#\r\n# with open(\"data.txt\", \"w\") as outfile:\r\n# json.dump(data, outfile)\r\n\r\n\r\n# char1 = character(characterName, characterLevel, characterClass)\r\n# char1.printDetails()\r\n#\r\n# time.sleep(2) # Pause between displaying character details\r\n#\r\n# enemy1 = randomEnemy()\r\n#\r\n# time.sleep(2) # Pause before first battle sequence\r\n#\r\n# battleSequence(char1, enemy1)\r\n\r\n", "sub_path": "Game.py", "file_name": "Game.py", "file_ext": "py", "file_size_in_byte": 16526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "random.randint", "line_number": 17, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 28, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 42, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 51, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 130, "usage_type": "call"}]} {"seq_id": "595274881", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom app import models\n\n# Register your models here.\ndef register_admin(model):\n \"\"\"Turn admin.site.register into a decorator.\"\"\"\n def wrapper(klass):\n admin.site.register(model, klass)\n return klass\n return wrapper\n\n@admin.register(models.journal_submission)\nclass JournalSubmissionAdmin(admin.ModelAdmin):\n list_display = ('user','created')\n list_filter = ('created',)\n search_fields = ['user']\n\n@admin.register(models.embed)\nclass EmbedAdmin(admin.ModelAdmin):\n list_display = ('summary','url','source','message')\n list_filter = ('source',)\n search_fields = ['summary']\n\n@admin.register(models.question)\nclass QuestionAdmin(admin.ModelAdmin):\n list_display = ('question','category')\n list_filter = ('category',)\n search_fields = ['question']\n", "sub_path": "app/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.contrib.admin.site.register", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}, {"api_name": "app.models.journal_submission", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "app.models.embed", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 27, "usage_type": "name"}, {"api_name": "app.models.question", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 27, "usage_type": "name"}]} {"seq_id": "335507748", "text": "#!/usr/bin/python3\n\"\"\"\nTest for the City class\n\"\"\"\n\n\nfrom datetime import datetime\nimport inspect\nfrom models import city\nfrom models.base_model import BaseModel\nimport pep8\nimport unittest\nCity = city.City\n\nc = \"created_at\"\nu = \"updated_at\"\n\n\nclass TestStateDocStyle(unittest.TestCase):\n \"\"\"test for documentation and pep8 style\"\"\"\n def setUp(cls):\n \"\"\"set up for methods\"\"\"\n cls.city_func = inspect.getmembers(City, inspect.isfunction)\n\n def test_pep8(self):\n \"\"\"test pep8 in City class\"\"\"\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['models/city.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors and warnings.\")\n\n def test_pep8_test(self):\n \"\"\"test pep8 for test to City class\"\"\"\n pep8s = pep8.StyleGuide(quiet=True)\n result = pep8s.check_files(['tests/test_models/test_city.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors and warnings.\")\n\n def test_module_doc(self):\n \"\"\"test for the doc in module\"\"\"\n self.assertIsNot(city.__doc__, None,\n \"city.py without a docstring\")\n self.assertTrue(len(city.__doc__) >= 1,\n \"city.py without a docstring\")\n\n def test_class_doc(self):\n \"\"\"test for class documentation\"\"\"\n self.assertIsNot(City.__doc__, None,\n \"City class without docstring\")\n self.assertTrue(len(City.__doc__) >= 1,\n \"City class without docstring\")\n\n def test_func_doc(self):\n \"\"\"test for doc in methods\"\"\"\n for func in self.city_func:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} docstring needed in method\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} docstring needed in method\".format(func[0]))\n\n\nclass TestCityClass(unittest.TestCase):\n \"\"\"test for class\"\"\"\n def test_is_subclass(self):\n \"\"\"test for a subclass of BaseModel\"\"\"\n city = City()\n self.assertIsInstance(city, BaseModel)\n self.assertTrue(hasattr(city, \"id\"))\n self.assertTrue(hasattr(city, \"created_at\"))\n self.assertTrue(hasattr(city, \"updated_at\"))\n\n def test_state_id(self):\n \"\"\"thest for for attribute state id for City class\"\"\"\n city = City()\n self.assertTrue(hasattr(city, \"state_id\"))\n self.assertEqual(city.state_id, \"\")\n\n def test_name(self):\n \"\"\"test for the state_id attr\"\"\"\n city = City()\n self.assertTrue(hasattr(city, \"name\"))\n self.assertEqual(city.name, \"\")\n\n def test_to_dict_values(self):\n \"\"\"test the values in dict\"\"\"\n time = \"%Y-%m-%dT%H:%M:%S.%f\"\n city = City()\n new_dict = city.to_dict()\n self.assertEqual(new_dict[\"__class__\"], \"City\")\n self.assertEqual(type(new_dict[c]), str)\n self.assertEqual(type(new_dict[u]), str)\n self.assertEqual(new_dict[c], city.created_at.strftime(time))\n self.assertEqual(new_dict[u], city.updated_at.strftime(time))\n\n def test_str(self):\n \"\"\"test for output str method\"\"\"\n city = City()\n string = \"[City] ({}) {}\".format(city.id, city.__dict__)\n self.assertEqual(string, str(city))\n", "sub_path": "tests/test_models/test_city.py", "file_name": "test_city.py", "file_ext": "py", "file_size_in_byte": 3381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "models.city.City", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 13, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 19, "usage_type": "attribute"}, {"api_name": "inspect.getmembers", "line_number": 23, "usage_type": "call"}, {"api_name": "inspect.isfunction", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pep8.StyleGuide", "line_number": 27, "usage_type": "call"}, {"api_name": "pep8.StyleGuide", "line_number": 34, "usage_type": "call"}, {"api_name": "models.city.__doc__", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 41, "usage_type": "name"}, {"api_name": "models.city.__doc__", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 43, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 66, "usage_type": "name"}, {"api_name": "models.city", "line_number": 67, "usage_type": "argument"}, {"api_name": "models.base_model.BaseModel", "line_number": 67, "usage_type": "argument"}, {"api_name": "models.city", "line_number": 68, "usage_type": "argument"}, {"api_name": "models.city", "line_number": 69, "usage_type": "argument"}, {"api_name": "models.city", "line_number": 70, "usage_type": "argument"}, {"api_name": "models.city", "line_number": 74, "usage_type": "name"}, {"api_name": "models.city", "line_number": 75, "usage_type": "argument"}, {"api_name": "models.city.state_id", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 76, "usage_type": "name"}, {"api_name": "models.city", "line_number": 80, "usage_type": "name"}, {"api_name": "models.city", "line_number": 81, "usage_type": "argument"}, {"api_name": "models.city.name", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 82, "usage_type": "name"}, {"api_name": "models.city", "line_number": 87, "usage_type": "name"}, {"api_name": "models.city.to_dict", "line_number": 88, "usage_type": "call"}, {"api_name": "models.city", "line_number": 88, "usage_type": "name"}, {"api_name": "models.city.created_at.strftime", "line_number": 92, "usage_type": "call"}, {"api_name": "models.city.created_at", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 92, "usage_type": "name"}, {"api_name": "models.city.updated_at.strftime", "line_number": 93, "usage_type": "call"}, {"api_name": "models.city.updated_at", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 93, "usage_type": "name"}, {"api_name": "models.city", "line_number": 97, "usage_type": "name"}, {"api_name": "models.city.id", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 98, "usage_type": "name"}, {"api_name": "models.city.__dict__", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.city", "line_number": 99, "usage_type": "argument"}]} {"seq_id": "354086442", "text": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n# ==============================================================================\n\"\"\"Test IOTensor\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport math\nimport numpy\nimport pandas\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\nimport tensorflow as tf\nif not (hasattr(tf, \"version\") and tf.version.VERSION.startswith(\"2.\")):\n tf.compat.v1.enable_eager_execution()\nfrom tensorflow.keras.models import Sequential # pylint: disable=wrong-import-position\nfrom tensorflow.keras.layers import Dense # pylint: disable=wrong-import-position\nfrom tensorflow.keras.layers import LSTM # pylint: disable=wrong-import-position\n\nimport tensorflow_io as tfio # pylint: disable=wrong-import-position\n\n\ndef test_window():\n \"\"\"test_window\"\"\"\n value = [[e] for e in range(100)]\n value = tfio.IOTensor.from_tensor(tf.constant(value))\n value = value.window(3)\n expected_value = [[e, e+1, e+2] for e in range(98)]\n assert numpy.all(value.to_tensor().numpy() == expected_value)\n\n v = tfio.IOTensor.from_tensor(tf.constant([1, 2, 3, 4, 5]))\n v = v.window(3)\n assert numpy.all(v.to_tensor().numpy() == [[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n\ndef test_window_to_dataset():\n \"\"\"test_window_to_dataset\"\"\"\n value = [[e] for e in range(100)]\n value = tfio.IOTensor.from_tensor(tf.constant(value))\n value = value.window(3)\n expected_value = [[e, e+1, e+2] for e in range(98)]\n dataset = value.to_dataset()\n dataset_value = [d.numpy().tolist() for d in dataset]\n assert numpy.all(dataset_value == expected_value)\n\ndef test_io_tensor_from_tensor_with_sklearn():\n \"\"\"test_io_tensor_from_tensor_with_sklearn\"\"\"\n\n # The test example is based on:\n # https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/\n #\n # Both IOTensor and pandas/sklearn are used, to show the usage of IOTensor.\n airline_passengers_csv = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"test_csv\", \"airline-passengers.csv\")\n\n dataframe = pandas.read_csv(\n airline_passengers_csv, usecols=[1], engine='python')\n\n numpy.random.seed(7)\n\n dataset = dataframe.values\n dataset = dataset.astype('float32')\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n dataset = scaler.fit_transform(dataset)\n\n # split into train and test sets\n train_size = int(len(dataset) * 0.67)\n # test_size = len(dataset) - train_size\n train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]\n print(len(train), len(test))\n\n # convert an array of values into a dataset matrix\n def create_dataset(dataset, look_back=1):\n data_x, data_y = [], []\n for i in range(len(dataset)-look_back-1):\n a = dataset[i:(i+look_back), 0]\n data_x.append(a)\n data_y.append(dataset[i + look_back, 0])\n return numpy.array(data_x), numpy.array(data_y)\n\n # reshape into X=t and Y=t+1\n look_back = 1\n train_x, train_y = create_dataset(train, look_back)\n test_x, test_y = create_dataset(test, look_back)\n\n # reshape input to be [samples, time steps, features]\n train_x = numpy.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))\n test_x = numpy.reshape(test_x, (test_x.shape[0], 1, test_x.shape[1]))\n\n # create and fit the LSTM network\n model = Sequential()\n model.add(LSTM(4, input_shape=(1, look_back)))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam')\n\n #model.fit(train_x, train_y, epochs=100, batch_size=1, verbose=2)\n\n #########################################\n ############ IOTensor Match: ############\n train_size = int(len(dataset) * 0.67)\n #test_size = len(dataset) - train_size\n dataset = dataframe.values\n dataset = dataset.astype('float32')\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(tfio.IOTensor.from_tensor(dataset))\n\n train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]\n\n train_dataset = tfio.IOTensor.from_tensor(train)\n\n train_dataset = tfio.IOTensor.from_tensor(scaler.transform(train_dataset))\n\n train_dataset = train_dataset.window(look_back + 1)\n train_dataset = train_dataset.to_dataset()\n train_dataset = train_dataset.map(lambda e: tf.split(e, [look_back, 1]))\n train_dataset = train_dataset.map(lambda x, y: (tf.reshape(x, [1, look_back]), y))\n print(\"train_dataset: \", train_dataset)\n\n test_dataset = tfio.IOTensor.from_tensor(test)\n\n test_dataset = tfio.IOTensor.from_tensor(scaler.transform(test_dataset))\n\n test_dataset = test_dataset.window(look_back + 1)\n test_dataset = test_dataset.to_dataset()\n test_dataset = test_dataset.map(lambda e: tf.split(e, [look_back, 1]))\n test_dataset = test_dataset.map(lambda x, y: (tf.reshape(x, [1, look_back]), y))\n\n model.fit(train_dataset.batch(1), epochs=100, verbose=2)\n #########################################\n\n # make predictions\n train_predict = model.predict(train_x)\n test_predict = model.predict(test_x)\n\n #########################################\n ############ IOTensor Match: ############\n train_x_dataset = train_dataset.map(lambda x, y: x)\n test_x_dataset = test_dataset.map(lambda x, y: x)\n\n train_x_dataset = train_x_dataset.batch(1)\n test_x_dataset = test_x_dataset.batch(1)\n\n train_predict_dataset = model.predict(train_x_dataset)\n test_predict_dataset = model.predict(test_x_dataset)\n\n train_predict_dataset_value = [d.tolist() for d in train_predict_dataset]\n test_predict_dataset_value = [d.tolist() for d in test_predict_dataset]\n\n # Note: train_predict_dataset_value and test_predict_dataset_value\n # have one extra compared with original implementation.\n train_predict_dataset_value = train_predict_dataset_value[:-1]\n test_predict_dataset_value = test_predict_dataset_value[:-1]\n assert numpy.allclose(train_predict_dataset_value, train_predict.tolist())\n assert numpy.allclose(test_predict_dataset_value, test_predict.tolist())\n #########################################\n\n # invert predictions\n train_predict = scaler.inverse_transform(train_predict)\n train_y = scaler.inverse_transform([train_y])\n test_predict = scaler.inverse_transform(test_predict)\n test_y = scaler.inverse_transform([test_y])\n # calculate root mean squared error\n train_score = math.sqrt(mean_squared_error(train_y[0], train_predict[:, 0]))\n print('Train Score: %.2f RMSE' % (train_score))\n test_score = math.sqrt(mean_squared_error(test_y[0], test_predict[:, 0]))\n print('Test Score: %.2f RMSE' % (test_score))\n\n # shift train predictions for plotting\n train_predict_plot = numpy.empty_like(dataset)\n train_predict_plot[:, :] = numpy.nan\n train_predict_plot[look_back:len(train_predict)+look_back, :] = train_predict\n # shift test predictions for plotting\n test_predict_plot = numpy.empty_like(dataset)\n test_predict_plot[:, :] = numpy.nan\n test_predict_plot[\n len(train_predict)+(look_back*2)+1:len(dataset)-1, :] = test_predict\n\n # plot baseline and predictions\n #plt.plot(scaler.inverse_transform(dataset))\n #plt.plot(train_predict_plot)\n #plt.plot(test_predict_plot)\n #plt.show()\n", "sub_path": "tests/test_io_tensor_eager.py", "file_name": "test_io_tensor_eager.py", "file_ext": "py", "file_size_in_byte": 7680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tensorflow.version.VERSION.startswith", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.version", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.enable_eager_execution", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tensorflow.split", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 135, "usage_type": "attribute"}, {"api_name": "tensorflow_io.IOTensor.from_tensor", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow_io.IOTensor", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.split", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 170, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 179, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 186, "usage_type": "attribute"}, {"api_name": "numpy.empty_like", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 190, "usage_type": "attribute"}]} {"seq_id": "524933136", "text": "from PyQt5 import QtWidgets\n\n\nclass Settings(QtWidgets.QWidget):\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.initUI()\n\n def initUI(self):\n\n vbox = QtWidgets.QVBoxLayout()\n\n\n wrap_vbox = QtWidgets.QVBoxLayout()\n lbl = QtWidgets.QLabel('test lbl for db2')\n wrap_vbox.addWidget(lbl)\n\n\n\n\n # ะ’ะพะทะฒั€ะฐั‰ะฐะตะผ ะฒ ะพัะฝะพะฒะฝะพะน ะผะฐะบะตั‚\n self.out_window = wrap_vbox\n\n\n\n", "sub_path": "Modules/Dialect/UI_window/db2.py", "file_name": "db2.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 4, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 4, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget.__init__", "line_number": 6, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 15, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 15, "usage_type": "name"}]} {"seq_id": "434986838", "text": "from sqlalchemy import create_engine, MetaData, Table\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.style.use('ggplot')\n\ndef get_data(db_url, table_name='exp_table', data_column_name='datastring', statuses=[3,4,5,7], exclude = []):\n # boilerplace sqlalchemy setup\n engine = create_engine(db_url)\n metadata = MetaData()\n metadata.bind = engine\n table = Table(table_name, metadata, autoload=True)\n # make a query and loop through\n s = table.select()\n rows = s.execute()\n\n data = []\n\n for row in rows:\n # only use subjects who completed experiment and aren't excluded\n if row['status'] in statuses and row['uniqueid'] not in exclude:\n data.append(row[data_column_name])\n\n # Now we have all participant datastrings in a list.\n # Let's make it a bit easier to work with:\n\n # parse each participant's datastring as json object\n # and take the 'data' sub-object\n data = [json.loads(part)['data'] for part in data]\n\n # insert uniqueid field into trialdata in case it wasn't added\n # in experiment:\n for part in data:\n for record in part:\n record['trialdata']['uniqueid'] = record['uniqueid']\n\n # flatten nested list so we just have a list of the trialdata recorded\n # each time psiturk.recordTrialData(trialdata) was called.\n data = [record['trialdata'] for part in data for record in part]\n\n # Put all subjects' trial data into a dataframe object from the\n # 'pandas' python library: one option among many for analysis\n data_frame = pd.DataFrame(data)\n \n return data_frame\n\ndef get_learning_curve(data, block_size=30, plot=True):\n data_len = len(data)\n n_blocks = data_len/block_size\n block_indices = np.linspace(0, data_len, num=n_blocks+1, dtype=int)\n p_correct = np.zeros(n_blocks)\n for block in range(n_blocks):\n p_correct[block] = np.mean(data['hit'][block_indices[block]:block_indices[block+1]])\n if plot:\n pc, = plt.plot(range(1, n_blocks+1), p_correct)\n plt.xlabel(\"block\", fontsize=30)\n plt.ylabel(\"% correct\", fontsize=30)\n plt.ylim(0, 1)\n plt.xticks(range(1, n_blocks+1))\n return p_correct\n\ndef get_avg_samps(data, block_size=30, plot=True, att_feature='locSamps'):\n data_len = len(data)\n n_blocks = data_len/block_size\n block_indices = np.linspace(0, data_len, num=n_blocks+1, dtype=int)\n att_p = np.zeros(n_blocks)\n for block in range(n_blocks):\n att_p[block] = np.mean(data[att_feature][block_indices[block]:block_indices[block+1]])\n if plot:\n pc, = plt.plot(range(1, n_blocks+1), att_p)\n plt.xlabel(\"block\", fontsize=30)\n plt.ylabel(\"Avg. # of samples\", fontsize=30)\n plt.ylim(0, 10)\n plt.xticks(range(1, n_blocks+1))\n return att_p\n\ndef get_noise_estimates(data, plot=True):\n\tcolor_noise = data[data['colorNoise'].notnull()]['colorNoise']\n\tcolor_noise = color_noise[color_noise.keys()[0]]\n\tloc_noise = data[data['colorNoise'].notnull()]['locNoise']\n\tloc_noise = loc_noise[loc_noise.keys()[0]]\n\tif plot:\n\t\tcolor, = plt.plot(color_noise, label=\"Color\")\n\t\tloc, = plt.plot(loc_noise, label=\"Location\")\n\t\tplt.xlabel(\"# samples\", fontsize=30)\n\t\tplt.ylabel(\"$\\sigma$\", fontsize=30)\n\t\tplt.ylim(0, 1)\n\t\tplt.legend(handles=[loc, color], loc=2, bbox_to_anchor=(1.05, 1))\n\treturn loc_noise, color_noise", "sub_path": "aa_utils/aa_analysis.py", "file_name": "aa_analysis.py", "file_ext": "py", "file_size_in_byte": 3411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "matplotlib.style.use", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}]} {"seq_id": "594548963", "text": "from flask import (\n current_app as app,\n url_for,\n render_template,\n redirect,\n request\n)\nfrom .forms import CalcularForm\nfrom .calculos import calculo_juros_compostos\nfrom .plotly_wrapper import create_plot_calculos\nimport pandas as pd\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef calcular():\n calcularForm = CalcularForm()\n \n if calcularForm.validate_on_submit():\n montante_inicial = calcularForm.montante_inicial.data\n aportes_mensais = calcularForm.aportes_mensais.data\n taxa_mensal = calcularForm.taxa_mensal.data\n tempo_meses = calcularForm.tempo_meses.data\n\n return redirect(url_for('resultado', montante=montante_inicial,aportes=aportes_mensais, taxa=taxa_mensal, tempo=tempo_meses))\n return render_template(\n 'index.html',\n form=calcularForm\n )\n\n@app.route(\"/resultado\")\n\ndef resultado():\n capital_mensal = calculo_juros_compostos(\n montante_inicial=float(request.args['montante']), \n aportes_mensais=float(request.args['aportes']),\n taxa_mensal=float(request.args['taxa']),\n tempo_meses=int(request.args['tempo'])\n )\n plot = create_plot_calculos(round(capital_mensal, 2))\n return render_template(\n \"resultado.html\",\n montante_inicial=round(float(request.args['montante']), 2),\n capital_final=round(capital_mensal['Capital'].iloc[-1], 2), \n juros_acumulados=round(capital_mensal['Juros Acumulados'].iloc[-1], 2), \n total_aportado=round(capital_mensal['Valor Aportado'].iloc[-1], 2),\n plot=plot\n )\n\n@app.route(\"/favicon.ico\")\ndef favicon():\n return app.send_static_file('favicon.ico')", "sub_path": "application/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "forms.CalcularForm", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.current_app.route", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 13, "usage_type": "name"}, {"api_name": "calculos.calculo_juros_compostos", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "plotly_wrapper.create_plot_calculos", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.current_app.route", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.current_app.send_static_file", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.current_app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 48, "usage_type": "name"}]} {"seq_id": "523121702", "text": "import redis\n\nclass RedisClient(object):\n \"\"\"\n Redisๅฎขๆˆท็ซฏ\n \"\"\"\n _CLIENT = None\n\n @classmethod\n def get_client(cls):\n \"\"\"\n ่Žทๅ–ๅฎขๆˆท็ซฏ\n :return:\n \"\"\"\n if cls._CLIENT:\n return cls._CLIENT\n\n pool = redis.ConnectionPool(host='localhost', port=6379, db=0)\n\n cls._CLIENT = redis.StrictRedis(connection_pool=pool)\n return cls._CLIENT\n", "sub_path": "redis_client.py", "file_name": "redis_client.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "redis.ConnectionPool", "line_number": 18, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 20, "usage_type": "call"}]} {"seq_id": "631706369", "text": "import unittest, sys\nsys.path.extend(['.','..','../..','py'])\n\nimport h2o, h2o_cmd, h2o_import as h2i, h2o_browse as h2b\nfrom h2o_test import find_file, dump_json, verboseprint\n\nclass Basic(unittest.TestCase):\n def tearDown(self):\n h2o.check_sandbox_for_errors()\n\n @classmethod\n def setUpClass(cls):\n h2o.init()\n\n @classmethod\n def tearDownClass(cls):\n h2o.tear_down_cloud()\n\n def test_simple2(self):\n # h2o-dev doesn't take ../.. type paths? make find_file return absolute path\n a_node = h2o.nodes[0]\n\n # import_result = a_node.import_files(path=find_file(\"smalldata/logreg/prostate.csv\"))\n import_result = a_node.import_files(path=find_file(\"smalldata/poker/poker-hand-testing.data\"))\n # print dump_json(import_result)\n\n k = import_result['keys'][0]\n # frames_result = a_node.frames(key=k[0], len=5)\n\n frames_result = a_node.frames(key=k)\n\n frame = frames_result['frames'][0]\n byteSize = frame['byteSize']\n rows = frame['rows']\n columns = frame['columns']\n for c in columns:\n label = c['label']\n missing = c['missing']\n stype = c['type']\n zeros = c['zeros']\n domain = c['domain']\n\n # print dump_json(frame)\n\n # how do you parse multiple files\n parse_result = a_node.parse(key=k)\n\n frame = parse_result['frames'][0]\n hex_key = frame['key']['name']\n\n verboseprint(hex_key, \":\", dump_json(parse_result))\n\nif __name__ == '__main__':\n h2o.unit_main()\n", "sub_path": "py2/testdir_single_jvm/test_simple2.py", "file_name": "test_simple2.py", "file_ext": "py", "file_size_in_byte": 1578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.extend", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "h2o.check_sandbox_for_errors", "line_number": 9, "usage_type": "call"}, {"api_name": "h2o.init", "line_number": 13, "usage_type": "call"}, {"api_name": "h2o.tear_down_cloud", "line_number": 17, "usage_type": "call"}, {"api_name": "h2o.nodes", "line_number": 21, "usage_type": "attribute"}, {"api_name": "h2o_test.find_file", "line_number": 24, "usage_type": "call"}, {"api_name": "h2o_test.verboseprint", "line_number": 51, "usage_type": "call"}, {"api_name": "h2o_test.dump_json", "line_number": 51, "usage_type": "call"}, {"api_name": "h2o.unit_main", "line_number": 54, "usage_type": "call"}]} {"seq_id": "584408423", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, redirect, url_for, render_template, request\nfrom auth import admin_required\nfrom apps.tenancy.admin.forms import TenancyInfoForm, TenancyForm\nfrom apps.tenancy.models import TenancyInfo, Tenancy\nfrom google.appengine.ext import blobstore\nfrom apps.utils.blobstore import get_uploads\nimport os\nfrom apps.file.models import File\n\nmod = Blueprint(\n 'tenancy.admin',\n __name__,\n template_folder='templates',\n url_prefix='/admin/services/tenancy'\n)\n\n@mod.route('/', methods=['GET', 'POST'], endpoint='index')\n@admin_required\ndef index():\n tenancy_info = TenancyInfo.get_master_db()\n form = TenancyInfoForm(obj=tenancy_info)\n if request.method == 'POST' and form.validate_on_submit():\n form.populate_obj(tenancy_info)\n tenancy_info.put()\n return redirect(url_for('tenancy.admin.index'))\n return render_template(\n 'tenancy/admin/index.html',\n index_active='active',\n form=form\n )\n\n@mod.route('/tenancy/', methods=['GET'])\n@admin_required\ndef tenancy():\n tenancies = Tenancy.query()\n return render_template(\n 'tenancy/admin/tenancy.html',\n tenancy_active='active',\n tenancies=tenancies\n )\n\n@mod.route('/tenancy/add/', methods=['GET', 'POST'], endpoint='add')\n@admin_required\ndef tenancy_add():\n form = TenancyForm()\n url = blobstore.create_upload_url(\n url_for('tenancy.admin.add')\n )\n if request.method == 'POST':\n upload_files = get_uploads(request, 'attach_file')\n file_ = None\n if len(upload_files):\n blob_info = upload_files[0]\n else:\n blob_info = None\n\n if form.validate_on_submit():\n if blob_info:\n blob_info = blobstore.BlobInfo.get(blob_info.key())\n if blob_info.size:\n file_ = File.create(\n blob_key=blob_info.key(),\n title=form.name.data,\n description=form.description.data,\n is_public=form.is_public.data,\n filename=os.path.basename(blob_info.filename.replace('\\\\', '/')),\n size=blob_info.size,\n content_type=blob_info.content_type)\n file_.put()\n else:\n blob_info.delete()\n\n tenancy = Tenancy()\n form.populate_obj(tenancy)\n if file_:\n tenancy.file = file_.key\n tenancy.put()\n return redirect(url_for('tenancy.admin.tenancy'))\n\n tenancies = Tenancy.query()\n return render_template(\n 'tenancy/admin/add.html',\n add_active='active',\n form=form,\n tenancies=tenancies,\n url=url\n )\n\n\n@mod.route('/tenancy//', methods=['GET', 'POST'], endpoint='edit')\n@admin_required\ndef tenancy_edit(key_id):\n tenancy = Tenancy.retrieve_by_id(key_id)\n if not tenancy:\n return redirect(url_for('tenancy.admin.tenancy'))\n if request.method == 'POST' and 'delete_tenancy' in request.form:\n tenancy.key.delete()\n return redirect(url_for('tenancy.admin.tenancy'))\n url = blobstore.create_upload_url(\n url_for('tenancy.admin.edit', key_id=key_id)\n )\n form = TenancyForm(obj=tenancy)\n if request.method == 'POST':\n upload_files = get_uploads(request, 'attach_file')\n file_ = None\n if len(upload_files):\n blob_info = upload_files[0]\n else:\n blob_info = None\n\n if form.validate_on_submit():\n if blob_info:\n blob_info = blobstore.BlobInfo.get(blob_info.key())\n if blob_info.size:\n file_ = File.create(\n blob_key=blob_info.key(),\n title=form.name.data,\n description=form.description.data,\n is_public=form.is_public.data,\n filename=os.path.basename(blob_info.filename.replace('\\\\', '/')),\n size=blob_info.size,\n content_type=blob_info.content_type)\n file_.put()\n else:\n blob_info.delete()\n\n form.populate_obj(tenancy)\n if file_:\n if tenancy.file:\n tenancy.file.delete()\n tenancy.file = file_.key\n tenancy.put()\n return redirect(url_for('tenancy.admin.tenancy'))\n\n return render_template(\n 'tenancy/admin/tenancy_edit.html',\n tenancy=tenancy,\n form=form,\n url=url\n )\n", "sub_path": "apps/tenancy/admin/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.Blueprint", "line_number": 12, "usage_type": "call"}, {"api_name": "apps.tenancy.models.TenancyInfo.get_master_db", "line_number": 22, "usage_type": "call"}, {"api_name": "apps.tenancy.models.TenancyInfo", "line_number": 22, "usage_type": "name"}, {"api_name": "apps.tenancy.admin.forms.TenancyInfoForm", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "auth.admin_required", "line_number": 20, "usage_type": "name"}, {"api_name": "apps.tenancy.models.Tenancy.query", "line_number": 37, "usage_type": "call"}, {"api_name": "apps.tenancy.models.Tenancy", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "auth.admin_required", "line_number": 35, "usage_type": "name"}, {"api_name": "apps.tenancy.admin.forms.TenancyForm", "line_number": 47, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.create_upload_url", "line_number": 48, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "apps.utils.blobstore.get_uploads", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "argument"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo.get", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo", "line_number": 61, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.blobstore", "line_number": 61, "usage_type": "name"}, {"api_name": "apps.file.models.File.create", "line_number": 63, "usage_type": "call"}, {"api_name": "apps.file.models.File", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "apps.tenancy.models.Tenancy", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 80, "usage_type": "call"}, {"api_name": "apps.tenancy.models.Tenancy.query", "line_number": 82, "usage_type": "call"}, {"api_name": "apps.tenancy.models.Tenancy", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 83, "usage_type": "call"}, {"api_name": "auth.admin_required", "line_number": 45, "usage_type": "name"}, {"api_name": "apps.tenancy.models.Tenancy.retrieve_by_id", "line_number": 95, "usage_type": "call"}, {"api_name": "apps.tenancy.models.Tenancy", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 100, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.create_upload_url", "line_number": 101, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 102, "usage_type": "call"}, {"api_name": "apps.tenancy.admin.forms.TenancyForm", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "apps.utils.blobstore.get_uploads", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "argument"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo.get", "line_number": 115, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo", "line_number": 115, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.blobstore", "line_number": 115, "usage_type": "name"}, {"api_name": "apps.file.models.File.create", "line_number": 117, "usage_type": "call"}, {"api_name": "apps.file.models.File", "line_number": 117, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 137, "usage_type": "call"}, {"api_name": "auth.admin_required", "line_number": 93, "usage_type": "name"}]} {"seq_id": "280933737", "text": "# -*- coding: utf-8 -*-\nfrom openprocurement.api.utils.common import get_now\n\nfrom datetime import timedelta\nfrom copy import deepcopy\n\n\ntest_organization = {\n \"name\": u\"ะ”ะตั€ะถะฐะฒะฝะต ัƒะฟั€ะฐะฒะปั–ะฝะฝั ัะฟั€ะฐะฒะฐะผะธ\",\n \"identifier\": {\n \"scheme\": u\"UA-EDR\",\n \"id\": u\"00037256\",\n \"uri\": u\"http://www.dus.gov.ua/\"\n },\n \"address\": {\n \"countryName\": u\"ะฃะบั€ะฐั—ะฝะฐ\",\n \"postalCode\": u\"01220\",\n \"region\": u\"ะผ. ะšะธั—ะฒ\",\n \"locality\": u\"ะผ. ะšะธั—ะฒ\",\n \"streetAddress\": u\"ะฒัƒะป. ะ‘ะฐะฝะบะพะฒะฐ, 11, ะบะพั€ะฟัƒั 1\"\n },\n \"contactPoint\": {\n \"name\": u\"ะ”ะตั€ะถะฐะฒะฝะต ัƒะฟั€ะฐะฒะปั–ะฝะฝั ัะฟั€ะฐะฒะฐะผะธ\",\n \"telephone\": u\"0440000000\"\n }\n}\ntest_procuringEntity = test_organization.copy()\nswiftsure_procuring_entity = deepcopy(test_procuringEntity)\nswiftsure_procuring_entity.update({\n \"additionalContactPoints\": [\n {\n \"name\": u\"ะ”ะตั€ะถะฐะฒะฝะต ัƒะฟั€ะฐะฒะปั–ะฝะฝั ัะฟั€ะฐะฒะฐะผะธ\",\n \"telephone\": u\"0440000000\"\n }\n ]\n})\n\ncontract_create_data = {\n \"awardID\": \"376d560b2b2d452a80543865f3cab43e\",\n \"contractID\": \"a930574bf8cd405cb7f9c9ed4ca68061\",\n \"contractType\": \"ceasefire\",\n \"dateSigned\": get_now().isoformat(),\n \"merchandisingObject\": \"a930574bf8cd999cb7f9c9ed4ca68061\",\n \"period\": {\n \"startDate\": get_now().isoformat(),\n \"endDate\": (get_now() + timedelta(days=30)).isoformat(),\n },\n \"procuringEntity\": swiftsure_procuring_entity,\n \"title\": \"Test Contract\",\n \"suppliers\": [test_organization],\n \"value\": {\n \"currency\": \"UAH\",\n \"amount\": 500.0,\n \"valueAddedTaxIncluded\": True\n },\n \"items\": [\n {\n \"description\": u\"ะ—ะตะผะปั ะดะปั ะฒั–ะนััŒะบะพะฒะพัะปัƒะถะฑะพะฒั†ั–ะฒ\",\n \"classification\": {\n \"scheme\": u\"CPV\",\n \"id\": u\"66113000-5\",\n \"description\": u\"ะ—ะตะผะตะปัŒะฝั– ะดั–ะปัะฝะบะธ\"\n },\n \"unit\": {\n \"name\": u\"item\",\n \"code\": u\"44617100-9\"\n },\n \"quantity\": 5,\n \"registrationDetails\": {\n \"status\": \"unknown\",\n },\n \"address\": {\n \"countryName\": u\"ะฃะบั€ะฐั—ะฝะฐ\",\n \"postalCode\": \"79000\",\n \"region\": u\"ะผ. ะšะธั—ะฒ\",\n \"locality\": u\"ะผ. ะšะธั—ะฒ\",\n \"streetAddress\": u\"ะฒัƒะป. ะ‘ะฐะฝะบะพะฒะฐ 1\"\n }\n }\n ]\n}\n", "sub_path": "openprocurement/contracting/ceasefire/tests/fixtures/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 2497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "copy.deepcopy", "line_number": 28, "usage_type": "call"}, {"api_name": "openprocurement.api.utils.common.get_now", "line_number": 42, "usage_type": "call"}, {"api_name": "openprocurement.api.utils.common.get_now", "line_number": 45, "usage_type": "call"}, {"api_name": "openprocurement.api.utils.common.get_now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "call"}]} {"seq_id": "443128830", "text": "import argparse\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nfrom model import CapsulesNet\nfrom data import Data\n\n\ndef one_hot_encode(target, length):\n batch_size = target.size(0)\n one_hot_vec = torch.zeros((batch_size, length))\n for i in range(batch_size):\n one_hot_vec[i, target[i]] = 1.0\n\n return one_hot_vec\n\n\ndef main(args):\n # Set logger\n writer = SummaryWriter(log_dir=args.log_dir)\n\n # DATA\n data = Data(args)\n # Embed and visualize\n data.embed(writer)\n # Load data loader\n train_loader, test_loader = data.load()\n n_train_batches = len(train_loader)\n n_test_batches = len(test_loader)\n\n # MODEL\n model = CapsulesNet(args.n_conv_in_channel, args.n_conv_out_channel,\n args.n_primary_unit, args.primary_unit_size,\n args.n_classes, args.output_unit_size,\n args.n_routing, args.regularization_scale,\n args.input_width, args.input_height,\n args.use_cuda)\n\n if args.use_cuda:\n print('[INFO] Using {} GPU(s)'.format(torch.cuda.device_count()))\n model.cuda()\n\n # Info\n print(model)\n\n # Optimizer\n optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)\n\n # TRAINING\n # Train helper\n def train(epoch, model):\n # Switch model to train mode\n model.train()\n\n for i, (data, target) in enumerate(tqdm(train_loader, unit='batch')):\n # One-hot encode for labels\n target_one_hot = one_hot_encode(target, args.n_classes)\n\n # Wrap inputs into Variable\n data, target = Variable(data), Variable(target_one_hot)\n if args.use_cuda:\n data = data.cuda()\n target = target.cuda()\n\n # Forward\n optimizer.zero_grad()\n output = model(data)\n\n # Calculate loss\n loss, margin_loss, recon_loss = model.loss(data, output, target)\n\n # Backward\n loss.backward()\n optimizer.step()\n\n # Tensorboard log\n global_step = (epoch-1) * n_train_batches + i\n writer.add_scalar('train/total_loss', loss.data[0], global_step)\n writer.add_scalar('train/margin_loss' , margin_loss.data[0], global_step)\n writer.add_scalar('train/reconstruction_loss', recon_loss.data[0], global_step)\n\n # STDOUT log\n if (i+1) % args.log_interval == 0:\n template = \"[Epoch {}/{}] \" \\\n \"[Batch {}/{}] \" \\\n \"Total loss: {:.6f}, Margin loss: {:.6f}, Reconstruction loss: {:.6f}\"\n tqdm.write(template.format(epoch, args.epochs,\n i+1, n_train_batches,\n loss.data[0], margin_loss.data[0], recon_loss.data[0]))\n\n\n # Test helper\n def test(epoch, model):\n # Switch model to evaluate mode\n model.eval()\n\n loss, margin_loss, recon_loss = 0., 0., 0.\n correct = 0.\n\n for data, target in tqdm(test_loader):\n target_indices = target\n\n # One-hot encode for labels\n target_one_hot = one_hot_encode(target, args.n_classes)\n\n # Wrap inputs into Variable\n data, target = Variable(data, volatile=True), Variable(target_one_hot)\n if args.use_cuda:\n data = data.cuda()\n target = target.cuda()\n\n # Forward\n output = model(data)\n\n # Calculate loss, and sum up\n t_loss, m_loss, r_loss = model.loss(data, output, target,\n size_average=False)\n loss += t_loss.data[0]\n margin_loss += m_loss.data[0]\n recon_loss += r_loss.data[0]\n\n # Count number of correct prediction\n # v_magnitude shape: [batch_size, 10, 1, 1]\n v_magnitude = torch.sqrt((output**2).sum(dim=2, keepdim=True))\n # pred shape: [batch_size, 1, 1, 1]\n pred = v_magnitude.data.max(1, keepdim=True)[1].cpu()\n correct += pred.eq(target_indices.view_as(pred)).sum()\n\n # Tensorboard log\n loss /= n_test_batches\n margin_loss /= n_test_batches\n recon_loss /= n_test_batches\n accuracy = correct / len(test_loader.dataset)\n\n global_step = epoch * n_train_batches\n writer.add_scalar('test/total_loss', loss, global_step)\n writer.add_scalar('test/margin_loss', margin_loss, global_step)\n writer.add_scalar('test/recon_loss', recon_loss, global_step)\n writer.add_scalar('test/accuracy', accuracy, global_step)\n\n # IMAGES RECONSTRUCTION\n if epoch % args.recon_interval == 0:\n reconstruction = model.decoder(output, target)\n # Resize to batch of images [batch_size, C, H, W]\n recon_imgs = reconstruction.view(-1, args.n_conv_in_channel,\n args.input_height, args.input_width)\n # Visualize in Tensorboard\n recon_grid = vutils.make_grid(recon_imgs.data,\n normalize=True, scale_each=True)\n original_grid = vutils.make_grid(data.data,\n normalize=True, scale_each=True)\n writer.add_image('test/original_{}_{}'.format(epoch, global_step),\n original_grid, global_step)\n writer.add_image('test/reconstruction_{}_{}'.format(epoch, global_step),\n recon_grid, global_step)\n\n # STDOUT log\n template = \"\"\"[Test {}]\n Total loss: {:.6f}, Margin loss: {:.6f}, Reconstruction loss: {:.6f}\n Accuracy: {:.4f}%\n \"\"\"\n tqdm.write(template.format(epoch,\n loss, margin_loss, recon_loss,\n accuracy * 100))\n\n\n def checkpoint(epoch, parameters):\n \"\"\"Helper for saving model's parameters\n \"\"\"\n if not os.path.exists(args.model_dir):\n os.mkdir(args.model_dir)\n model_path = os.path.join(args.model_dir,\n 'epoch_{}.pth'.format(epoch))\n torch.save(parameters, model_path)\n print('[INFO] Checkpoint {} for epoch {}'.format(model_path, epoch))\n\n\n # Start training\n for epoch in range(1, args.epochs+1):\n train(epoch, model)\n test(epoch, model)\n\n if epoch % args.save_interval == 0:\n checkpoint(epoch, model.state_dict())\n\n writer.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--epochs', type=int, default=25)\n parser.add_argument('--learning_rate', type=float, default=0.01)\n parser.add_argument('--batch_size', type=int, default=128)\n\n parser.add_argument('--n_conv_in_channel', type=int, default=1)\n parser.add_argument('--n_conv_out_channel', type=int, default=256)\n parser.add_argument('--n_primary_unit', type=int, default=8)\n parser.add_argument('--primary_unit_size', type=int, default=1152)\n parser.add_argument('--n_classes', type=int, default=10)\n parser.add_argument('--output_unit_size', type=int, default=16)\n parser.add_argument('--n_routing', type=int, default=3)\n parser.add_argument('--regularization_scale', type=float, default=0.0005)\n parser.add_argument('--input_height', type=int, default=28)\n parser.add_argument('--input_width', type=int, default=28)\n\n parser.add_argument('--no_cuda', action='store_true', default=False)\n parser.add_argument('--log_interval', type=int, default=10)\n parser.add_argument('--save_interval', type=int, default=5)\n parser.add_argument('--recon_interval', type=int, default=5)\n\n parser.add_argument('--log_dir', type=str, default='logs')\n parser.add_argument('--model_dir', type=str, default='models')\n\n args = parser.parse_args()\n args.use_cuda = (not args.no_cuda) and torch.cuda.is_available()\n\n print(args)\n main(args)\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 8264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "torch.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 27, "usage_type": "call"}, {"api_name": "data.Data", "line_number": 30, "usage_type": "call"}, {"api_name": "data.embed", "line_number": 32, "usage_type": "call"}, {"api_name": "data.load", "line_number": 34, "usage_type": "call"}, {"api_name": "model.CapsulesNet", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 47, "usage_type": "attribute"}, {"api_name": "model.cuda", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 54, "usage_type": "name"}, {"api_name": "model.parameters", "line_number": 54, "usage_type": "call"}, {"api_name": "model.train", "line_number": 60, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 67, "usage_type": "call"}, {"api_name": "data.cuda", "line_number": 69, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 77, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 94, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 94, "usage_type": "name"}, {"api_name": "model.eval", "line_number": 102, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 114, "usage_type": "call"}, {"api_name": "data.cuda", "line_number": 116, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 131, "usage_type": "call"}, {"api_name": "model.decoder", "line_number": 150, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 155, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 155, "usage_type": "name"}, {"api_name": "torchvision.utils.make_grid", "line_number": 157, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 157, "usage_type": "name"}, {"api_name": "data.data", "line_number": 157, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm.write", "line_number": 169, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 169, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 181, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 191, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 223, "usage_type": "attribute"}]} {"seq_id": "225459998", "text": "#!/usr/bin/env python3\n\n__version__ = \"0.4.5\"\n__author__ = \"Jens Luebeck\"\n\nimport argparse\nimport copy\nfrom math import log\nimport operator\nimport sys\n\nfrom ac_util import *\nimport get_genes\n\ntot_min_del = 5000 # minimum size of deletion before non-trivial\nminCycleSize = 10000\ncompCycContCut = 50000\ncycCut = 0.12\ncompCut = 0.3\nmin_upper_cn = 4.5\ndecomposition_strictness = 0.1\n\n# bfb thresholds\nmin_score_for_bfb = 0.25\n# min_fb_reads_for_bfb = 10\nfb_dist_cut = 25000\n\n#graph properties\ngraph_cns = defaultdict(IntervalTree)\n\n# ------------------------------------------------------------\n# Methods to compute values used in classification\ndef get_size(cycle, segSeqD):\n return sum(segSeqD[abs(x)][2] - segSeqD[abs(x)][1] for x in cycle)\n\n\ndef weightedCycleAmount(cycle, cn, segSeqD):\n # get length of cycle\n sc_length = get_size(cycle, segSeqD) / 1000.\n return sc_length * cn\n\n\ndef get_diff(e1, e2, segSeqD):\n p1_abs = segSeqD[abs(e1)]\n p2_abs = segSeqD[abs(e2)]\n if e1 == 0 or e2 == 0:\n return 1\n\n p1_end = p1_abs[2] if e1 > 0 else p1_abs[1]\n p2_start = p2_abs[1] if e2 > 0 else p2_abs[2]\n return abs(p2_start - p1_end)\n\n\ndef isCircular(cycle):\n circular = False if cycle[0] == 0 and cycle[-1] == 0 else True\n return circular\n\n\ndef isRearranged(cycle, segSeqD):\n # check if it contains regions from multiple chroms\n chromList = [segSeqD[abs(ind)][0] for ind in cycle if ind != 0]\n if len(set(chromList)) > 1:\n return True\n\n max_del_size = 0\n for i in range(0, len(cycle) - 1):\n if cycle[i] == 0 or cycle[i + 1] == 0:\n continue\n if cycle[i] < 0 and cycle[i + 1] > 0 or cycle[i] > 0 and cycle[i + 1] < 0:\n return True\n\n dist_diff = get_diff(cycle[i], cycle[i + 1], segSeqD)\n max_del_size = max(dist_diff, max_del_size)\n # tot_del_size += dist_diff\n # if tot_del_size > tot_min_del:\n # print(\"Delsize\")\n # return True\n if max_del_size > tot_min_del:\n return True\n\n return False\n\n\ndef tot_rearr_edges(graphf, add_chr_tag):\n rearr_e = 0\n with open(graphf) as infile:\n for line in infile:\n if line.startswith(\"discordant\"):\n fields = line.rstrip().rsplit()\n lbp, rbp = fields[1].split(\"->\")\n lchrom, lpd = lbp.rsplit(\":\")\n rchrom, rpd = rbp.rsplit(\":\")\n if add_chr_tag and not lchrom.startswith(\"chr\"):\n lchrom = \"chr\" + lchrom\n rchrom = \"chr\" + rchrom\n\n lpos, ldir = int(lpd[:-1]), lpd[-1]\n rpos, rdir = int(rpd[:-1]), rpd[-1]\n if ldir == rdir:\n # if lchrom == rchrom and abs(rpos - lpos) < fb_dist_cut:\n rearr_e += 1\n\n elif abs(rpos - lpos) > fb_dist_cut:\n rearr_e += 1\n\n return rearr_e\n\n\ndef decompositionComplexity(graphf, cycleList, cycleCNs, segSeqD, feature_inds, exclude_inds, add_chr_tag):\n #construct intervaltree of valid regions\n hit_region_it = defaultdict(IntervalTree)\n for i in feature_inds:\n cycle = cycleList[i]\n for cv in cycle:\n if cv != 0:\n c, s, e = segSeqD[abs(cv)]\n hit_region_it[c].addi(s, e+1)\n\n hf_cut = 0.8\n totalGraphWeight = 0\n segs = 0\n with open(graphf) as infile:\n for line in infile:\n if line.startswith(\"sequence\"):\n fields = line.rsplit()\n c, s, e = fields[1].rsplit(\":\")[0], int(fields[1].rsplit(\":\")[1][:-1]), int(fields[2].rsplit(\":\")[1][:-1])+1\n if add_chr_tag and not c.startswith('chr'):\n c = \"chr\" + c\n\n if not hit_region_it[c][s:e]:\n continue\n\n cn = float(fields[3])\n size = float(fields[5]) / 1000.\n\n # if cn > 1:\n segs += 1\n totalGraphWeight += (size * cn)\n\n elif line.startswith(\"BreakpointEdge\"):\n break\n\n # cycleWeights = [0] * len(feature_inds)\n # for ind, cycle in enumerate(cycleList):\n cycleWeights = []\n new_feat_inds = set()\n for ind, cycle in enumerate(cycleList):\n if ind not in exclude_inds:\n hits = False\n for cv in cycle:\n if cv != 0:\n c, s, e = segSeqD[abs(cv)]\n if hit_region_it[c][s:e]:\n hits = True\n break\n if hits:\n wca = weightedCycleAmount(cycle, cycleCNs[ind], segSeqD)\n if ind in feature_inds:\n new_feat_inds.add(len(cycleWeights))\n\n cycleWeights.append(wca)\n\n\n # scW = sorted(cycleWeights, reverse=True)\n # cf = cycleWeights[0]/totalGraphWeight\n\n cf = 0\n fe_ent = 0\n added_cf = 0\n cInd = 0\n if totalGraphWeight > 0:\n while cf + added_cf < hf_cut and cInd < len(cycleWeights):\n if cInd in new_feat_inds:\n cf += added_cf\n if added_cf > 0:\n fe_ent += (added_cf * log(added_cf))\n\n added_cf = cycleWeights[cInd] / float(totalGraphWeight)\n\n cInd += 1\n\n cf+=added_cf\n cf = round(cf, 5)\n rf = (1 - cf)\n # print(rf, cf, totalGraphWeight)\n if rf > 0:\n fu_ent = -1 * rf * log(rf)\n else:\n fu_ent = 0\n\n else:\n print(\"Warning: total graph weight <= 0\")\n fu_ent = 0\n\n seg_ent = log(1.0 / segs) if segs > 0 else 0\n return fu_ent - fe_ent - seg_ent, fu_ent - fe_ent, -1 * seg_ent\n\n\n# Compute f (foldback fraction) from the edges in the AA graph alone\ndef compute_f_from_AA_graph(graphf, add_chr_tag):\n with open(graphf) as infile:\n fbCount, nonFbCount, fbEdges, maxCN = 0, 0, 0, 0\n for line in infile:\n fields = line.rstrip().rsplit()\n if line.startswith(\"discordant\"):\n lbp, rbp = fields[1].split(\"->\")\n lchrom, lpd = lbp.rsplit(\":\")\n rchrom, rpd = rbp.rsplit(\":\")\n if add_chr_tag and not lchrom.startswith('chr'):\n lchrom = \"chr\" + lchrom\n rchrom = \"chr\" + rchrom\n\n lpos, ldir = int(lpd[:-1]), lpd[-1]\n rpos, rdir = int(rpd[:-1]), rpd[-1]\n\n if lcD[lchrom][lpos] or lcD[rchrom][rpos]:\n continue\n\n elif fields[0] == \"discordant\" and rchrom == lchrom and abs(rpos - lpos) <= 2000 and rdir == '-' and ldir == '+':\n continue\n\n rSupp = int(fields[3])\n if ldir == rdir:\n if lchrom == rchrom and abs(rpos - lpos) < fb_dist_cut:\n fbCount += rSupp\n fbEdges += 1\n\n else:\n nonFbCount += rSupp\n\n else:\n nonFbCount += rSupp\n\n elif line.startswith(\"sequence\"):\n if not lcD[fields[1].rsplit(\":\")[0]].overlaps(int(fields[1].rsplit(\":\")[1][:-1]),\n int(fields[2].rsplit(\":\")[1][:-1])):\n\n ccn = float(fields[3])\n if ccn > maxCN:\n maxCN = ccn\n\n # just return 0 if there isn't enough support\n if fbEdges < 2:\n return 0, maxCN\n\n return fbCount / max(1.0, float(fbCount + nonFbCount)), maxCN\n\n\ndef nonbfb_cycles_are_ecdna(non_bfb_cycle_inds, cycleList, segSeqD, cycleCNs):\n for ind in non_bfb_cycle_inds:\n cycle = cycleList[ind]\n length = get_size(cycle, segSeqD)\n\n if length > 100000 and cycleCNs[ind] > 5:\n return True\n\n return False\n\n\n# proportion of cycles with foldbacks\ndef cycles_file_bfb_props(cycleList, segSeqD, cycleCNs, graphf, add_chr_tag):\n FB_breaks = 0.0\n distal_breaks = 0.0\n lin_breaks = 0.0\n\n bfb_weight = 0.0\n non_bfb_cycle_weight = 0.0\n tot_bfb_supp_cycles = 0\n\n non_bfb_cycle_inds = []\n bfb_cycle_inds = []\n\n for ind, ocycle in enumerate(cycleList):\n cycle = copy.copy(ocycle)\n if cycle[0] != 0:\n cycle.append(cycle[0])\n\n hit = False\n isBFBelem = False\n illegalBFB = False\n for a, b in zip(cycle[:-1], cycle[1:]):\n # changes direction on same chrom\n diff = get_diff(a, b, segSeqD)\n aSize = get_size([a, ], segSeqD)\n bSize = get_size([b, ], segSeqD)\n if aSize < minCycleSize and bSize < minCycleSize:\n continue\n\n # check if front and back are connected via everted edge\n front_to_back_connection = amp_encompassed(cycle, segSeqD, graphf, add_chr_tag)\n if front_to_back_connection:\n # print(\"Cycle has front to back link\", cycle)\n illegalBFB = True\n\n else:\n if a * b < 0 and segSeqD[abs(a)][0] == segSeqD[abs(b)][0]:\n hit = True\n if diff < 50000:\n isBFBelem = True\n FB_breaks += cycleCNs[ind]\n\n else:\n distal_breaks += cycleCNs[ind]\n\n elif diff > tot_min_del:\n hit = True\n distal_breaks += cycleCNs[ind]\n\n if segSeqD[abs(a)][0] != segSeqD[abs(b)][0] and not (a == 0 or b == 0):\n illegalBFB = True\n\n if illegalBFB:\n isBFBelem = False\n\n if cycle[0] == 0 and not hit and get_size(cycle,segSeqD) > 10000:\n lin_breaks += cycleCNs[ind]\n\n if isBFBelem:\n tot_bfb_supp_cycles += 1\n bfb_weight += cycleCNs[ind]\n bfb_cycle_inds.append(ind)\n\n elif cycle[0] != 0 and get_size(cycle[:-1], segSeqD) > 30000:\n non_bfb_cycle_weight += cycleCNs[ind]\n non_bfb_cycle_inds.append(ind)\n\n hasEC = nonbfb_cycles_are_ecdna(non_bfb_cycle_inds, cycleList, segSeqD, cycleCNs)\n # if len(cycleList) >= 5:\n # minBFBCyclesRequired = 2\n # else:\n minBFBCyclesRequired = 2\n if FB_breaks > 1.5 and tot_bfb_supp_cycles >= minBFBCyclesRequired:\n tot = float(FB_breaks + distal_breaks + lin_breaks)\n return FB_breaks / tot, distal_breaks / tot, bfb_weight / (non_bfb_cycle_weight + bfb_weight), hasEC, \\\n non_bfb_cycle_inds, bfb_cycle_inds\n\n return 0, 0, 0, False, [], []\n\n\n# ------------------------------------------------------------\n# Classifications\ndef cycleIsNoAmpInvalid(cycle, cn, segSeqD, isSingleton, maxCN):\n # CN flow can be split across multiple amps\n if not isSingleton:\n scale = min(args.min_cn_flow, maxCN * decomposition_strictness)\n elif maxCN > 7:\n scale = min(3., maxCN / 8.)\n else:\n scale = 2.5\n\n # print(cycle)\n # print(\"decomp cutoff\",scale)\n\n if (cn <= scale) or (maxCN < min_upper_cn):\n return True\n\n length = get_size(cycle, segSeqD)\n return length < minCycleSize\n\n\ndef classifyConnections(cycleSet1, cycleSet2, clfs):\n cycleSet1, cycleSet2 = sorted([cycleSet1, cycleSet2], key=lambda x: len(x), reverse=True)\n csets = []\n resultDict = defaultdict(float)\n if not cycleSet2:\n for c1 in cycleSet1:\n csets.append(frozenset([clfs[c1], ]))\n\n else:\n for c1 in cycleSet1:\n for c2 in cycleSet2:\n csets.append(frozenset([clfs[c1], clfs[c2]]))\n\n distributed_edge_value = 1.0 / len(csets) if csets else 0\n for cset in csets:\n resultDict[cset] += distributed_edge_value\n\n return resultDict\n\n\n# categories = [\"No amp/Invalid\", \"Linear amplification\", \"Trivial cycle\", \"Complex non-cyclic\", \"Complex cyclic\"]\ndef classifyAmpliconProfile(amp_profile, rearr_e, totalCompCyclicCont, totCyclicCont, force=False):\n cycSig = amp_profile[\"Trivial cycle\"] + amp_profile[\"Complex cyclic\"]\n if (cycSig > cycCut or totalCompCyclicCont > compCycContCut) and totCyclicCont > 10000:\n return \"Cyclic\"\n\n elif amp_profile[\"Complex non-cyclic\"] + cycSig > compCut:\n if rearr_e < 2:\n return \"Linear amplification\"\n\n return \"Complex non-cyclic\"\n\n else:\n if max(amp_profile.values()) == 0:\n return \"No amp/Invalid\"\n\n elif amp_profile[\"No amp/Invalid\"] > 0:\n if amp_profile[\"Linear amplification\"] / float(amp_profile[\"No amp/Invalid\"]) > 0.25:\n if rearr_e >= 5:\n return \"Complex non-cyclic\"\n\n return \"Linear amplification\"\n\n if force:\n del amp_profile[\"No amp/Invalid\"]\n if cycSig > max(amp_profile.values()):\n return \"Cyclic\"\n\n maxCat = max(amp_profile.items(), key=operator.itemgetter(1))[0]\n return maxCat\n\n\ndef classifyBFB(fb, cyc_sig, nonbfb_sig, bfb_cyc_ratio, maxCN):\n if fb < min_score_for_bfb or cyc_sig < 0.295 or maxCN < 4:\n return None\n\n # dominated by non-classical BFB cycles\n elif nonbfb_sig > 0.5 and bfb_cyc_ratio < 0.6:\n return None\n\n # if bfb_cyc_ratio < 0.85:\n # # if nonbfb_sig > 0.55:\n # # return None\n #\n # if args.use_BFB_linked_cyclic_class:\n # return \"BFB-linked cyclic\"\n\n return \"BFB\"\n\n\n# ------------------------------------------------------------\n# structure metanalysis\n\ndef check_max_cn(ec_cycle_inds, cycleList, segSeqD):\n for e_ind in ec_cycle_inds:\n for c_id in cycleList[e_ind]:\n chrom, l, r = segSeqD[abs(c_id)]\n if r - l < 1000:\n continue\n\n for i in graph_cns[chrom][l:r]:\n if i.data > min_upper_cn:\n return True\n\n return False\n\n\ndef get_amount_sigamp(ec_cycle_inds, cycleList, segSeqD):\n used_content = defaultdict(set)\n for e_ind in ec_cycle_inds:\n for c_id in cycleList[e_ind]:\n chrom, l, r = segSeqD[abs(c_id)]\n if not chrom:\n continue\n seg_t = IntervalTree([Interval(l, r+1)])\n olapping_low_cns = [x for x in graph_cns[chrom][l:r] if x.data < 4]\n for x in olapping_low_cns:\n seg_t.chop(x.begin, x.end)\n for x in seg_t:\n used_content[chrom] |= set(range(x.begin, x.end))\n\n total_sigamp = 0\n for chrom, useset in used_content.items():\n total_sigamp += len(useset)\n\n return total_sigamp\n\n\ndef clusterECCycles(cycleList, cycleCNs, segSeqD, excludableCycleIndices=None):\n padding = 500000\n indices = [x for x in range(len(cycleList)) if cycleList[x][0] != 0 and x not in excludableCycleIndices]\n clusters = []\n seenSegs = set()\n total_EC_size = 0\n for ind in indices:\n cycle = cycleList[ind]\n csize = get_size(cycle, segSeqD)\n total_EC_size+=csize\n if cycleCNs[ind] < args.min_cn_flow and csize < minCycleSize:\n continue\n\n cIndsToMerge = set()\n s_set = set([segSeqD[abs(s_num)] for s_num in cycle])\n s_set -= seenSegs\n if not s_set:\n continue\n\n for c_ind, clust_dict in enumerate(clusters):\n for s in s_set:\n if clust_dict[s[0]][s[1] - padding:s[2] + padding]:\n cIndsToMerge.add(c_ind)\n break\n\n newClusters = []\n newClust = defaultdict(IntervalTree)\n for s in s_set:\n newClust[s[0]].addi(s[1], s[2] + 1, ind)\n\n for c_ind, currClust in enumerate(clusters):\n if c_ind in cIndsToMerge:\n for k, v in currClust.items():\n for ival in v:\n newClust[k].addi(ival.begin, ival.end, ival.data)\n\n else:\n newClusters.append(currClust)\n\n newClusters.append(newClust)\n clusters = newClusters\n seenSegs |= s_set\n\n indexClusters = []\n # extract only the cycle indices from each cluster and return\n for clust in clusters:\n currIndexSet = set()\n for k, v in clust.items():\n for ival in v:\n currIndexSet.add(ival.data)\n\n if get_amount_sigamp(currIndexSet, cycleList, segSeqD) > 10000:\n indexClusters.append(currIndexSet)\n\n # remove those where the max CN is below threshold\n indexClusters = [x for x in indexClusters if check_max_cn(x, cycleList, segSeqD)]\n\n return indexClusters\n\n# ------------------------------------------------------------\n\n'''\nAmplicon Classes:\n#if not invalid\n\n1) No amp/Invalid\n2) Linear amplification\n3) Complex non-cyclic\n4) BFB\n5) BFB-linked cylic (ecBFB)\n6) Cyclic (ecDNA)\n\nGraph edge classes:\n1) No amp/Invalid\n2) Non-cyclic\n3) Integration\n4) Hybrid - joins amplified cyclic and non cyclic\n5) Cyclic\n\n'''\n\nmixLookups = {\n frozenset([\"No amp/Invalid\", ]): \"No amp/Invalid\",\n frozenset([\"No amp/Invalid\", \"Linear amplification\"]): \"Integration\",\n frozenset([\"No amp/Invalid\", \"Trivial cycle\"]): \"Integration\",\n frozenset([\"No amp/Invalid\", \"Complex non-cyclic\"]): \"Integration\",\n frozenset([\"No amp/Invalid\", \"Complex cyclic\"]): \"Integration\",\n frozenset([\"Linear amplification\"]): \"Non-cyclic\",\n frozenset([\"Linear amplification\", \"Trivial cycle\"]): \"Integration\",\n frozenset([\"Linear amplification\", \"Complex non-cyclic\"]): \"Non-cyclic\",\n frozenset([\"Linear amplification\", \"Complex cyclic\"]): \"Integration\",\n frozenset([\"Trivial cycle\"]): \"Cyclic\",\n frozenset([\"Trivial cycle\", \"Complex non-cyclic\"]): \"Hybrid\",\n frozenset([\"Trivial cycle\", \"Complex cyclic\"]): \"Cyclic\",\n frozenset([\"Complex non-cyclic\"]): \"Non-cyclic\",\n frozenset([\"Complex non-cyclic\", \"Complex cyclic\"]): \"Hybrid\",\n frozenset([\"Complex cyclic\"]): \"Cyclic\",\n}\n\n# (circular,complex)\ncategories = [\"No amp/Invalid\", \"Linear amplification\", \"Trivial cycle\", \"Complex non-cyclic\", \"Complex cyclic\",\n \"foldback_read_prop\", \"BFB_bwp\", \"Distal_bwp\", \"BFB_cwp\", \"Amp_entropy\", \"Amp_decomp_entropy\",\n \"Amp_nseg_entropy\"]\nmixing_cats = [\"No amp/Invalid\", \"Non-cyclic\", \"Integration\", \"Hybrid\", \"Cyclic\"]\n\nampDefs = {(False, False): \"Linear amplification\", (False, True): \"Complex non-cyclic\",\n (True, False): \"Trivial cycle\", (True, True): \"Complex cyclic\"}\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Classify AA amplicon type\")\n parser.add_argument(\"-i\", \"--input\", help=\"Path to list of files to use. Each line formatted as: \"\n \"sample_name cycles.txt graph.txt. Give this argument if not using -c and -g.\")\n parser.add_argument(\"-c\", \"--cycles\", help=\"AA-formatted cycles file\")\n parser.add_argument(\"-g\", \"--graph\", help=\"AA-formatted graph file\")\n parser.add_argument(\"--ref\", help=\"Reference genome name used for alignment, one of hg19, GRCh37, or GRCh38.\",\n choices=[\"hg19\", \"GRCh37\", \"hg38\", \"GRCh38\"], required=True)\n\n parser.add_argument(\"--min_cn_flow\", type=float, help=\"Minimum CN flow to consider as amplification (1.0).\",\n default=1)\n parser.add_argument(\"--min_size\", type=float, help=\"Minimum cycle size (in bp) to consider as valid amplicon \"\n \"(5000).\", default=5000)\n parser.add_argument(\"-o\", help=\"Output filename prefix\")\n parser.add_argument(\"--plotstyle\", help=\"Type of visualizations to produce.\",\n choices=[\"grouped\", \"individual\", \"noplot\"], default=\"noplot\")\n parser.add_argument(\"--force\", help=\"Disable No amp/Invalid class if possible\", action='store_true')\n # parser.add_argument(\"--use_BFB_linked_cyclic_class\", help=\"Include the \\'BFB-linked cyclic\\' class\",\n # action='store_true')\n parser.add_argument(\"--add_chr_tag\", help=\"Add \\'chr\\' to the beginning of chromosome names in input files.\",\n action='store_true')\n # parser.add_argument(\"--report_genes\", help=\"Extract list of genes from amplicons with given classification.\",\n # choices=[\"ecdna\", \"bfb\", \"other\", \"all\"], nargs='+', default=[])\n parser.add_argument(\"--report_complexity\", help=\"Compute a measure of amplicon entropy for each amplicon.\",\n action='store_true')\n parser.add_argument(\"--verbose_classification\", help=\"Generate verbose output with raw classification scores.\",\n action='store_true')\n parser.add_argument(\"--annotate_cycles_file\", help=\"Create an annotated cycles file, indicating the classification \"\n \"of the paths and cycles present.\", action='store_true')\n parser.add_argument(\"--no_LC_filter\", help=\"Do not filter low-complexity cycles. Not recommended to set this flag.\",\n action='store_true', default=False)\n parser.add_argument(\"--decomposition_strictness\", help=\"Value between 0 and 1 reflecting how strictly to filter \"\n \"low CN decompositions (default = 0.1). Higher values \"\n \"filter more of the low-weight decompositions.\", type=float,\n default=0.1)\n parser.add_argument(\"-v\", \"--version\", action='version', version='amplicon_classifier {version} \\n Author: Jens \\\n Luebeck (jluebeck [at] ucsd.edu)'.format(version=__version__))\n\n args = parser.parse_args()\n\n if not (args.cycles and args.graph) and not args.input:\n print(\"Need to specify (--cycles & --graph) or --input\\n\")\n sys.exit(1)\n\n if args.ref == \"hg38\": args.ref = \"GRCh38\"\n patch_links = read_patch_regions(args.ref)\n if 0 <= args.decomposition_strictness <= 1:\n decomposition_strictness= args.decomposition_strictness\n else:\n print(\"--decomposition_strictness must be a value between 0 and 1\")\n sys.exit(1)\n\n # check if aa data repo set, construct LC datatabase\n try:\n AA_DATA_REPO = os.environ[\"AA_DATA_REPO\"] + \"/\" + args.ref + \"/\"\n fDict = {}\n with open(AA_DATA_REPO + \"file_list.txt\") as infile:\n for line in infile:\n fields = line.strip().rsplit()\n fDict[fields[0]] = fields[1]\n\n lcPath = AA_DATA_REPO + fDict[\"mapability_exclude_filename\"]\n lcD = defaultdict(IntervalTree)\n if not args.no_LC_filter:\n lcD = buildLCDatabase(lcPath)\n\n except KeyError:\n sys.stderr.write(\"$AA_DATA_REPO not set. Please see AA installation instructions.\\n\")\n sys.exit(1)\n\n # gene_lookup = {}\n ftgd_list = [] # store list of feature gene classifications\n # gene_file_location_lookup = {\"hg19\": \"human_hg19_september_2011/Genes_July_2010_hg19.gff\",\n # \"GRCh38\": \"genes_hg38.gff\",\n # \"GRCh37\": \"human_hg19_september_2011/Genes_July_2010_hg19.gff\"}\n #\n # refGeneFileLoc = AA_DATA_REPO + gene_file_location_lookup[args.ref]\n\n # read the gene list\n refGeneFileLoc = AA_DATA_REPO + fDict[\"gene_filename\"]\n gene_lookup = get_genes.parse_genes(refGeneFileLoc)\n\n if not args.input:\n tempName = args.cycles.rsplit(\"/\")[-1].rsplit(\".\")[0]\n flist = [[tempName, args.cycles, args.graph]]\n if not args.o:\n args.o = os.path.basename(args.cycles).rsplit(\"_cycles.txt\")[0]\n\n else:\n flist = readFlist(args.input)\n if not args.o:\n args.o = os.path.basename(args.input).rsplit(\".\")[0]\n\n minCycleSize = args.min_size\n\n AMP_dvaluesList = []\n EDGE_dvaluesList = []\n AMP_classifications = []\n sampNames = []\n cyclesFiles = []\n featEntropyD = {}\n samp_to_ec_count = defaultdict(int)\n for fpair in flist:\n if len(fpair) > 2:\n sName, cyclesFile, graphFile = fpair\n sampNames.append(sName)\n cyclesFiles.append(cyclesFile)\n ampN = cyclesFile.rstrip(\"_cycles.txt\").rsplit(\"_\")[-1]\n print(sName, ampN)\n segSeqD, cycleList, cycleCNs = parseCycle(cyclesFile, graphFile, args.add_chr_tag, lcD, patch_links)\n\n else:\n print(fpair)\n sys.stderr.write(\"File list not properly formatted\\n\")\n sys.exit(1)\n\n cycleTypes = []\n cycleWeights = []\n invalidInds = []\n rearrCycleInds = set()\n graph_cns = get_graph_cns(graphFile, args.add_chr_tag)\n fb_prop, maxCN = compute_f_from_AA_graph(graphFile, args.add_chr_tag)\n rearr_e = tot_rearr_edges(graphFile, args.add_chr_tag)\n totalCompCyclicCont = 0\n totCyclicCont = 0\n for ind, cycle in enumerate(cycleList):\n hasNonCircLen1 = True if len(cycle) == 3 and cycle[0] == 0 else False\n oneCycle = (len(cycleList) == 1)\n isSingleton = hasNonCircLen1 or oneCycle\n if cycleIsNoAmpInvalid(cycle, cycleCNs[ind], segSeqD, isSingleton, maxCN) and not args.force:\n invalidInds.append(ind)\n cycleTypes.append(\"No amp/Invalid\")\n\n else:\n circCyc = isCircular(cycle)\n compCyc = isRearranged(cycle, segSeqD)\n if compCyc:\n rearrCycleInds.add(ind)\n if circCyc:\n totalCompCyclicCont += get_size(cycle, segSeqD)\n\n if circCyc:\n totCyclicCont += get_size(cycle, segSeqD)\n\n cycleTypes.append(ampDefs[(circCyc, compCyc)])\n\n currWt = weightedCycleAmount(cycle, cycleCNs[ind], segSeqD)\n cycleWeights.append(currWt)\n\n totalWeight = max(sum(cycleWeights), 1)\n AMP_dvaluesDict = {x: 0.0 for x in categories}\n for i, wt in zip(cycleTypes, cycleWeights):\n AMP_dvaluesDict[i] += (wt / totalWeight)\n\n # anything stored in AMP_dvaluesDict prior to running classify will get used in classification\n # make sure you're not putting in other properties before here.\n ampClass = classifyAmpliconProfile(AMP_dvaluesDict, rearr_e, totalCompCyclicCont, totCyclicCont)\n\n # decomposition/amplicon complexity\n totalEnt, decompEnt, nEnt = decompositionComplexity(graphFile, cycleList, cycleCNs, segSeqD,\n range(len(cycleList)), set(), args.add_chr_tag)\n AMP_dvaluesDict[\"Amp_entropy\"] = totalEnt\n AMP_dvaluesDict[\"Amp_decomp_entropy\"] = decompEnt\n AMP_dvaluesDict[\"Amp_nseg_entropy\"] = nEnt\n\n # now layer on the bfb classification\n # first compute some properties\n fb_prop, maxCN = compute_f_from_AA_graph(graphFile, args.add_chr_tag)\n\n fb_bwp, nfb_bwp, bfb_cwp, bfbHasEC, non_bfb_cycle_inds, bfb_cycle_inds = cycles_file_bfb_props(cycleList,\n segSeqD, cycleCNs, graphFile, args.add_chr_tag)\n # \"foldback_read_prop\", \"BFB_bwp\", \"Distal_bwp\", \"BFB_cwp\"\n AMP_dvaluesDict[\"foldback_read_prop\"] = fb_prop\n AMP_dvaluesDict[\"BFB_bwp\"] = fb_bwp\n AMP_dvaluesDict[\"Distal_bwp\"] = nfb_bwp\n AMP_dvaluesDict[\"BFB_cwp\"] = bfb_cwp\n\n bfbClass = classifyBFB(fb_prop, fb_bwp, nfb_bwp, bfb_cwp, maxCN)\n\n ecStat = False\n bfbStat = False\n if ampClass == \"Cyclic\" and not bfbClass:\n ecStat = True\n bfb_cycle_inds = []\n\n elif bfbClass and ampClass != \"No amp/Invalid\":\n bfbStat = True\n if bfbHasEC:\n ecStat = True\n\n else:\n bfb_cycle_inds = []\n\n # determine number of ecDNA present\n ecIndexClusters = []\n if ecStat:\n excludableCycleIndices = set(bfb_cycle_inds + invalidInds)\n ecIndexClusters = clusterECCycles(cycleList, cycleCNs, segSeqD, excludableCycleIndices)\n ecAmpliconCount = max(len(ecIndexClusters), 1)\n\n else:\n ecAmpliconCount = 0\n\n samp_to_ec_count[sName] += ecAmpliconCount\n # write entropy for each feature\n ecEntropies = []\n if ecAmpliconCount == 1 and not ecIndexClusters:\n ecEntropies.append((totalEnt, decompEnt, nEnt))\n\n for ecCycleList in ecIndexClusters:\n c_ex_I = bfb_cycle_inds if bfbStat else set()\n totalEnt, decompEnt, nEnt = decompositionComplexity(graphFile, cycleList, cycleCNs, segSeqD,\n ecCycleList, c_ex_I, args.add_chr_tag)\n ecEntropies.append((totalEnt, decompEnt, nEnt))\n\n for ind, etup in enumerate(ecEntropies):\n featEntropyD[(sName, ampN, \"ecDNA_\" + str(ind+1))] = etup\n\n if bfbStat:\n bfb_totalEnt, bfb_decompEnt, bfb_nEnt = decompositionComplexity(graphFile, cycleList, cycleCNs, segSeqD,\n bfb_cycle_inds, set(), args.add_chr_tag)\n featEntropyD[(sName, ampN, \"BFB_1\")] = (bfb_totalEnt, bfb_decompEnt, bfb_nEnt)\n\n # get genes\n feat_gene_truncs, feat_gene_cns = get_genes.extract_gene_list(sName, ampN, gene_lookup, cycleList, segSeqD,\n bfb_cycle_inds, ecIndexClusters, invalidInds,\n bfbStat, ecStat, ampClass, graphFile,\n args.add_chr_tag, args.o)\n\n ftgd_list.append([sName, ampN, feat_gene_truncs, feat_gene_cns])\n\n # store this additional information\n AMP_classifications.append((ampClass, ecStat, bfbStat, ecAmpliconCount))\n dvalues = [AMP_dvaluesDict[x] for x in categories]\n AMP_dvaluesList.append(dvalues)\n\n # edge classification\n edgeTypeCountD = defaultdict(float)\n if graphFile:\n posCycleLookup = buildPosCycleLookup(cycleList, segSeqD)\n bps = parseBPG(graphFile, args.add_chr_tag, lcD)\n for bp in bps:\n lCycles, rCycles = bpgEdgeToCycles(bp, posCycleLookup)\n # indices of left and right cycles on the discordant edges, and the index-ordered list of types\n resD = classifyConnections(lCycles, rCycles, cycleTypes)\n for k, v in resD.items():\n edgeTypeCountD[mixLookups[k]] += v\n\n # norm the values\n eTCDSum = float(sum(edgeTypeCountD.values()))\n for k, v in edgeTypeCountD.items():\n edgeTypeCountD[k] = v / eTCDSum\n\n dvalues = [edgeTypeCountD[x] for x in mixing_cats]\n EDGE_dvaluesList.append(dvalues)\n\n #write the annotated cycles file\n if args.annotate_cycles_file:\n outname = os.path.basename(cyclesFile).rsplit(\"_cycles\")[0] + \"_annotated_cycles.txt\"\n write_annotated_corrected_cycles_file(args.o, outname, cycleList, cycleCNs, segSeqD, bfb_cycle_inds,\n ecIndexClusters, invalidInds, rearrCycleInds)\n\n # PLOTTING\n textCategories = [\"No amp/Invalid\", \"Linear\\namplification\", \"Trivial\\ncycle\", \"Complex\\nnon-cyclic\",\n \"Complex\\ncyclic\", \"BFB\\nfoldback\"]\n if args.plotstyle == \"grouped\":\n from radar_plotting import *\n\n print(\"plotting\")\n make_classification_radar(textCategories, AMP_dvaluesList, args.o + \"_amp_class\", sampNames)\n make_classification_radar(mixing_cats, EDGE_dvaluesList, args.o + \"_edge_class\", sampNames)\n\n elif args.plotstyle == \"individual\":\n from radar_plotting import *\n\n print(\"plotting\")\n for a, e, s in zip(AMP_dvaluesList, EDGE_dvaluesList, sampNames):\n print(textCategories, a)\n make_classification_radar(textCategories, [a[:len(textCategories)], ], args.o + \"_\" + s + \"_amp_class\",\n sampNames)\n make_classification_radar(mixing_cats, [e, ], args.o + \"_\" + s + \"_edge_class\", sampNames)\n\n #OUTPUT FILE WRITING\n print(\"writing output files\")\n write_outputs(args, ftgd_list, featEntropyD, categories, sampNames, cyclesFiles, AMP_classifications,\n AMP_dvaluesList, mixing_cats, EDGE_dvaluesList, samp_to_ec_count)\n\n print(\"done\")\n", "sub_path": "amplicon_classifier.py", "file_name": "amplicon_classifier.py", "file_ext": "py", "file_size_in_byte": 32340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "math.log", "line_number": 176, "usage_type": "call"}, {"api_name": "math.log", "line_number": 187, "usage_type": "call"}, {"api_name": "math.log", "line_number": 195, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 274, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 409, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 578, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 619, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 627, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 644, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 644, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 645, "usage_type": "call"}, {"api_name": "get_genes.parse_genes", "line_number": 657, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 690, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 690, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 691, "usage_type": "call"}, {"api_name": "get_genes.extract_gene_list", "line_number": 801, "usage_type": "call"}]} {"seq_id": "603374310", "text": "from multiprocessing import Pool #parallel processing\nimport multiprocessing as mp\nimport structure\nfrom structure.global_constants import *\nfrom structure.cell import Tissue, BasicSpringForceNoGrowth\nimport structure.initialisation as init\nimport sys\nimport os\nimport numpy as np\nimport libs.pd_lib_neutral as lib\nimport libs.data as data\n\n\ndef calc_interactions(tissue,mutant_index,n):\n \"\"\"treats all cells with ancestor 'mutant_index' as cooperators\n returns:\n n (int): size of clone\n I_CC/I_CD (ints): number of cooperator-cooperator/defector interactions in population\n W_CC/W_CD (floats): number of cooperator-cooperator/defector interactions in pop. weighted by neighbour number \n \"\"\"\n neighbours = tissue.mesh.neighbours\n types = tissue.properties['ancestor']==mutant_index\n I_CC,I_CD,W_CC,W_CD,N_D = 0,0,0.,0.,0\n for ctype,cell_neighbours in zip(types,neighbours):\n if ctype:\n Cneigh,neigh = float(sum(types[cell_neighbours])),float(len(cell_neighbours))\n I_CC += Cneigh\n I_CD += neigh - Cneigh\n W_CC += Cneigh/neigh\n W_CD += (neigh-Cneigh)/neigh\n return [n,I_CC,I_CD,W_CC,W_CD]\n\ndef run_sim(i):\n \"\"\"run a single simulation and save interaction data for each clone\"\"\"\n rand = np.random.RandomState()\n dt=0.005*-50./MU\n tissue = lib.initialise_tissue_ancestors(l,dt,10.,10.,rand,MU)\n tissue.properties['ancestor']=np.arange(l*l)\n if init_timend is not None: tissue = lib.run(tissue,lib.simulation_ancestor_tracking(tissue,dt,init_timend/dt,init_timend/dt,rand),init_timend/dt,init_timend/dt)[-1]\n data = [calc_interactions(tissue,mutant_index,n)\n for tissue in lib.run_generator(lib.simulation_ancestor_tracking(tissue,dt,timend/dt,timestep/dt,rand,til_fix=True),timend/dt,timestep/dt)\n for mutant_index,n in enumerate(np.bincount(tissue.properties['ancestor'])) if n>=n_min] \n np.savetxt('%s/data_%d'%(outdir,i),data,fmt=('%4d','%4d','%4d','%4.6f','%4.6f'))\n return None\n\nl = 10 # population size N = l*l\ninit_timend = 10. # initial simulation time to equilibrate \ntimestep = 12. # timesteps at which to calc interaction data (hours)\ntimend = 10000. # length of simulation (hours)\nsim_runs = int(sys.argv[1]) # number of sims to run taken as command line arg\nMU = float(sys.argv[2]) #spring constant\nn_min = 1 \noutdir = 'interaction_data/supp_vary_MU/MU%d/raw_data'%MU\nif not os.path.exists(outdir): # if the outdir doesn't exist create it \n os.makedirs(outdir)\n\n\n# run simulations in parallel \ncpunum=mp.cpu_count()\npool = Pool(processes=cpunum-1,maxtasksperchild=1000)\npool.map(run_sim,range(sim_runs))\npool.close()\npool.join()\n", "sub_path": "VTdyn/cluster_stats_vary_MU.py", "file_name": "cluster_stats_vary_MU.py", "file_ext": "py", "file_size_in_byte": 2726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.random.RandomState", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 35, "usage_type": "attribute"}, {"api_name": "libs.pd_lib_neutral.initialise_tissue_ancestors", "line_number": 37, "usage_type": "call"}, {"api_name": "libs.pd_lib_neutral", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "libs.pd_lib_neutral.run", "line_number": 39, "usage_type": "call"}, {"api_name": "libs.pd_lib_neutral", "line_number": 39, "usage_type": "name"}, {"api_name": "libs.pd_lib_neutral.simulation_ancestor_tracking", "line_number": 39, "usage_type": "call"}, {"api_name": "libs.data", "line_number": 40, "usage_type": "name"}, {"api_name": "libs.pd_lib_neutral.run_generator", "line_number": 41, "usage_type": "call"}, {"api_name": "libs.pd_lib_neutral", "line_number": 41, "usage_type": "name"}, {"api_name": "libs.pd_lib_neutral.simulation_ancestor_tracking", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 43, "usage_type": "call"}, {"api_name": "libs.data", "line_number": 43, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 55, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 59, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 60, "usage_type": "call"}]} {"seq_id": "68990903", "text": "# MIT License\n#\n# Copyright (c) 2020-2022 Quoc Tran\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport datetime as dt\nfrom sklearn import linear_model\nimport streamlit as st\nimport pwlf_mod as pwlf\nfrom csv import writer\n\n#DEATH_RATE = 0.01\n#ICU_RATE = 0.05\n#HOSPITAL_RATE = 0.15\n#SYMPTOM_RATE = 0.2\n#INFECT_2_HOSPITAL_TIME = 13\n#HOSPITAL_2_ICU_TIME = 2\n#ICU_2_DEATH_TIME = 5\n#ICU_2_RECOVER_TIME = 11\n#NOT_ICU_DISCHARGE_TIME = 7\n\n\ndef get_data(file_template='../csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_{type}_{scope}.csv',\n type='deaths', scope='global'):\n \"\"\"\n type = enum('deaths', 'confirmed', 'recovered'),\n scope = enum('global', 'US')\n \"\"\"\n death_data = pd.read_csv(file_template.format(type=type, scope=scope))\n return death_data.rename(index=str, columns={\"Country/Region\": \"Country\",\n \"Province/State\": \"State\",\n \"Country_Region\": \"Country\",\n \"Province_State\": \"State\",\n \"Admin2\": \"County\"})\n\n\ndef get_data_by_country(country, type='deaths'):\n global_data = get_data(scope='global', type=type)\n local_data = global_data.query('Country == \"{}\"'.format(country)).iloc[:,4:].T.sum(axis=1).to_frame()\n local_data.index = pd.to_datetime(local_data.index)\n return local_data[local_data>0].dropna()\n\n\ndef get_data_by_state(state, type='deaths'):\n US_data = get_data(scope='US', type=type)\n local_data = US_data.query('State == \"{}\"'.format(state)).iloc[:,12:].T.sum(axis=1).to_frame()\n local_data.index = pd.to_datetime(local_data.index)\n return local_data[local_data>0].dropna()\n\n\ndef get_data_by_county_and_state(county, state, type='deaths'):\n US_data = get_data(scope='US', type=type)\n local_data = US_data.query('County == \"{}\" and State == \"{}\"'.format(county, state)).iloc[:,12:].T.sum(axis=1).to_frame()\n local_data.index = pd.to_datetime(local_data.index)\n return local_data[local_data>0].dropna()\n\n\ndef get_lockdown_date_global(csv_file='data/lockdown_date_country.csv'):\n return pd.read_csv(csv_file)[['country', 'lockdown_date']].set_index('country')\n\n\ndef get_lockdown_date_by_country(country):\n try:\n lockdown_date = pd.to_datetime(get_lockdown_date_global().loc[country][0])\n except KeyError:\n #lockdown_date = dt.date.today()\n lockdown_date = None\n return lockdown_date\n\n\ndef get_lockdown_date_US(csv_file='data/lockdown_date_state_US.csv'):\n return pd.read_csv(csv_file)[['state', 'lockdown_date']].set_index('state')\n\n\ndef get_lockdown_date_by_state_US(state):\n try:\n lockdown_date = pd.to_datetime(get_lockdown_date_US().loc[state][0])\n except KeyError:\n #lockdown_date = dt.date.today()\n lockdown_date = None\n return lockdown_date\n\n\ndef get_daily_data(cum_data):\n return cum_data.diff().fillna(0)\n\n\ndef get_impute_from_death(death_row, periods, end_date_offset=0):\n date_ind = death_row.name\n end_date = date_ind + dt.timedelta(end_date_offset)\n date_range = pd.date_range(end=end_date, periods=periods)\n return pd.DataFrame(death_row.tolist()*periods, index=date_range)\n\n\ndef get_hospital_beds_from_death(death_row):\n '''Get imputation of hospital beds needed from one day record of new death'''\n dead_hospital_use_periods = HOSPITAL_2_ICU_TIME+ICU_2_DEATH_TIME\n dead_hospital_use = get_impute_from_death(death_row=death_row, \n periods=dead_hospital_use_periods)\n ICU_recovered_hospital_use_periods = HOSPITAL_2_ICU_TIME+ICU_2_RECOVER_TIME+NOT_ICU_DISCHARGE_TIME\n ICU_recovered_hospital_use_end_date_offset = ICU_2_RECOVER_TIME-ICU_2_DEATH_TIME+NOT_ICU_DISCHARGE_TIME\n ICU_recovered_hospital_use = get_impute_from_death(death_row=death_row, \n periods=ICU_recovered_hospital_use_periods,\n end_date_offset=ICU_recovered_hospital_use_end_date_offset)\n no_ICU_hospital_use_periods = NOT_ICU_DISCHARGE_TIME\n no_ICU_hospital_use_end_date_offset = -HOSPITAL_2_ICU_TIME-ICU_2_DEATH_TIME+NOT_ICU_DISCHARGE_TIME\n no_ICU_hospital_use = get_impute_from_death(death_row=death_row, \n periods=no_ICU_hospital_use_periods,\n end_date_offset=no_ICU_hospital_use_end_date_offset)\n hospital_beds = dead_hospital_use.add(((ICU_RATE-DEATH_RATE)/DEATH_RATE)*ICU_recovered_hospital_use, fill_value=0)\\\n .add(((HOSPITAL_RATE-ICU_RATE)/DEATH_RATE)*no_ICU_hospital_use, fill_value=0)\n hospital_beds.columns = ['hospital_beds']\n return hospital_beds\n\n\ndef get_ICU_from_death(death_row):\n '''Get imputation of ICU needed from one day record of new death'''\n dead_ICU_use = get_impute_from_death(death_row=death_row, periods=ICU_2_DEATH_TIME)\n recovered_ICU_use_end_date_offset = ICU_2_RECOVER_TIME-ICU_2_DEATH_TIME\n recovered_ICU_use = get_impute_from_death(death_row=death_row, \n periods=ICU_2_RECOVER_TIME,\n end_date_offset=recovered_ICU_use_end_date_offset)\n ICU_n = dead_ICU_use.add(((ICU_RATE-DEATH_RATE)/DEATH_RATE)*recovered_ICU_use, fill_value=0)\n ICU_n.columns = ['ICU']\n return ICU_n\n\n\ndef get_infected_cases(local_death_data):\n '''This number only is close to number of confirmed case in country very early in the disease and \n can still do contact tracing or very wide testing, eg. South Korea, Germany'''\n delay_time = INFECT_2_HOSPITAL_TIME + HOSPITAL_2_ICU_TIME + ICU_2_DEATH_TIME\n infected_cases = (100/DEATH_RATE)*local_death_data.tshift(-delay_time)\n infected_cases.columns = ['infected']\n return infected_cases\n\n\ndef get_symptomatic_cases(local_death_data):\n '''This is number of cases that show clear symptoms (severe),\n in country without investigative testing this is close to number of confirmed case, most country'''\n delay_time = HOSPITAL_2_ICU_TIME + ICU_2_DEATH_TIME\n symptomatic_cases = (SYMPTOM_RATE/DEATH_RATE)*local_death_data.tshift(-delay_time)\n symptomatic_cases.columns = ['symptomatic']\n return symptomatic_cases\n\n\ndef get_hospitalized_cases(local_death_data):\n '''In country with severe lack of testing, this is close to number of confirmed case, eg. Italy, Iran'''\n delay_time = HOSPITAL_2_ICU_TIME + ICU_2_DEATH_TIME\n hospitalized_cases = (HOSPITAL_RATE/DEATH_RATE)*local_death_data.tshift(-delay_time)\n hospitalized_cases.columns = ['hospitalized']\n return hospitalized_cases\n\n\ndef get_number_hospital_beds_need(daily_local_death_new):\n '''Calculate number of hospital bed needed from number of daily new death '''\n # Start by first date\n hospital_beds = get_hospital_beds_from_death(daily_local_death_new.iloc[0])\n # Run through all days\n for i in range(len(daily_local_death_new)-1):\n hospital_beds = hospital_beds.add(get_hospital_beds_from_death(daily_local_death_new.iloc[i+1]), \n fill_value=0)\n hospital_beds = hospital_beds.iloc[:-(HOSPITAL_2_ICU_TIME+ICU_2_RECOVER_TIME+NOT_ICU_DISCHARGE_TIME)]\n return hospital_beds\n\n\ndef get_number_ICU_need(daily_local_death_new):\n '''Calculate number of ICU needed from number of daily new death '''\n # Start by first date\n ICU_n = get_ICU_from_death(daily_local_death_new.iloc[0])\n # Run through all days\n for i in range(len(daily_local_death_new)-1):\n ICU_n = ICU_n.add(get_ICU_from_death(daily_local_death_new.iloc[i+1]), fill_value=0)\n ICU_n = ICU_n.iloc[:-ICU_2_RECOVER_TIME]\n return ICU_n\n\n\ndef get_log_daily_predicted_death(local_death_data, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2):\n '''Since this is highly contagious disease. Daily new death, which is a proxy for daily new infected cases\n is model as d(t)=a*d(t-1) or equivalent to d(t) = b*a^(t). After a log transform, it becomes linear.\n log(d(t))=logb+t*loga, so we can use linear regression to provide forecast (use robust linear regressor to avoid\n data anomaly in death reporting)\n There are two seperate linear curves, one before the lockdown is effective(21 days after lockdown) and one after\n For using this prediction to infer back the other metrics (infected cases, hospital, ICU, etc..) only the before\n curve is used and valid. If we assume there is no new infection after lock down (perfect lockdown), the after\n curve only depends on the distribution of time to death since ICU.\n WARNING: if lockdown_date is not provided, we will default to no lockdown to raise awareness of worst case\n if no action. If you have info on lockdown date please use it to make sure the model provide accurate result'''\n daily_local_death_new = get_daily_data(local_death_data)\n daily_local_death_new = daily_local_death_new.rolling(3, min_periods=1).mean()\n #shift ahead 1 day to avoid overfitted due to average of exponential value\n #daily_local_death_new = daily_local_death_new.shift(1)\n #import pdb; pdb.set_trace()\n daily_local_death_new.columns = ['death']\n log_daily_death = np.log(daily_local_death_new)\n # log_daily_death.dropna(inplace=True)\n data_start_date = min(local_death_data.index)\n data_end_date = max(local_death_data.index)\n forecast_end_date = data_end_date + dt.timedelta(forecast_horizon)\n forecast_date_index = pd.date_range(start=data_start_date, end=forecast_end_date)\n if lockdown_date is not None:\n lockdown_date = pd.to_datetime(lockdown_date)\n else:\n lockdown_date = forecast_end_date\n lockdown_effective_date = lockdown_date + dt.timedelta(\n INFECT_2_HOSPITAL_TIME + HOSPITAL_2_ICU_TIME + ICU_2_DEATH_TIME)\n data_start_date_idx = (data_start_date - lockdown_effective_date).days\n data_end_date_idx = (data_end_date - lockdown_effective_date).days\n forecast_end_date_idx = data_end_date_idx + forecast_horizon\n forecast_time_idx = (forecast_date_index - lockdown_effective_date).days.values\n data_time_idx = (log_daily_death.index - lockdown_effective_date).days.values\n log_daily_death['time_idx'] = data_time_idx\n log_daily_death = log_daily_death.replace([np.inf, -np.inf], np.nan).dropna()\n log_daily_death_before = log_daily_death[log_daily_death.time_idx < 0]\n regr_before = linear_model.HuberRegressor(fit_intercept=True)\n regr_before.fit(log_daily_death_before.time_idx.values.reshape(-1, 1), log_daily_death_before.death)\n outliers_before = regr_before.outliers_\n log_predicted_death_before_values = regr_before.predict(forecast_time_idx[forecast_time_idx < 0].reshape(-1, 1))\n log_predicted_death_before_index = forecast_date_index[forecast_time_idx < 0]\n log_predicted_death_before = pd.DataFrame(log_predicted_death_before_values,\n index=log_predicted_death_before_index)\n if all(forecast_time_idx < 0):\n print(\"Lockdown is not effective in forecast range. Second model not needed\")\n outliers = outliers_before\n regr_pw = pwlf.PiecewiseLinFit(x=log_daily_death[~outliers].time_idx.values, y=log_daily_death[~outliers].death)\n break_points = np.array([data_start_date_idx, data_end_date_idx])\n regr_pw.fit_with_breaks(break_points)\n variance = regr_pw.variance()\n log_predicted_death_pred_var_oos = variance * (forecast_time_idx[forecast_time_idx > data_end_date_idx] -\n data_end_date_idx)\n elif all(data_time_idx <= 3):\n print(\"Use default second model due to not enough data\")\n\n if (len(log_daily_death) - len(outliers_before))>0:\n outliers_after = np.array([False] * (len(log_daily_death) - len(outliers_before)))\n outliers = np.concatenate((outliers_before, outliers_after))\n else:\n outliers = outliers_before\n regr_pw = pwlf.PiecewiseLinFit(x=log_daily_death[~outliers].time_idx.values, y=log_daily_death[~outliers].death)\n break_points = np.array([data_start_date_idx, 0, forecast_end_date_idx])\n regr_pw.fit_with_breaks(break_points)\n # Replace second slope by default value, learning from local with same temperature, transportation\n regr_pw.beta[2]= -0.2\n variance = regr_pw.variance()\n log_predicted_death_pred_var_oos = variance*(forecast_time_idx[forecast_time_idx>data_end_date_idx]-\n data_end_date_idx)\n print(regr_pw.variance())\n print(len(forecast_time_idx[forecast_time_idx>data_end_date_idx]))\n else:\n regr_after = linear_model.HuberRegressor(fit_intercept=True)\n log_daily_death_after = log_daily_death[log_daily_death.time_idx >= 0]\n regr_after.fit(log_daily_death_after.time_idx.values.reshape(-1, 1),\n log_daily_death_after.death)\n outliers_after = regr_after.outliers_\n outliers = np.concatenate((outliers_before, outliers_after))\n regr_pw = pwlf.PiecewiseLinFit(x=log_daily_death[~outliers].time_idx.values, y=log_daily_death[~outliers].death)\n break_points = np.array([data_start_date_idx, 0, data_end_date_idx])\n regr_pw.fit_with_breaks(break_points)\n log_predicted_death_pred_var = regr_pw.prediction_variance(forecast_time_idx)\n log_predicted_death_pred_var_oos = log_predicted_death_pred_var[sum(forecast_time_idx <= data_end_date_idx):]\n #variance = regr_pw.variance()\n #log_predicted_death_pred_var_oos = variance*(forecast_time_idx[forecast_time_idx>data_end_date_idx]-\n # data_end_date_idx)\n\n model_beta = regr_pw.beta\n\n if relax_date is not None:\n relax_date = pd.to_datetime(relax_date)\n relax_effective_date = relax_date + dt.timedelta(\n INFECT_2_HOSPITAL_TIME + HOSPITAL_2_ICU_TIME + ICU_2_DEATH_TIME)\n relax_effective_date_idx = (relax_effective_date - lockdown_effective_date).days\n break_points = np.array([data_start_date_idx, 0, relax_effective_date_idx, forecast_end_date_idx])\n model_beta = np.append(model_beta, ((model_beta[1] + model_beta[2]) * contain_rate +\n model_beta[1] * (1 - contain_rate)) - (model_beta[1] + model_beta[2]))\n log_predicted_death_pred_var_oos = log_predicted_death_pred_var_oos*((0.2/(test_rate+0.01))**3)\n log_predicted_death_values = regr_pw.predict(forecast_time_idx, beta=model_beta, breaks=break_points)\n log_predicted_death_pred_var = regr_pw.prediction_variance(forecast_time_idx)\n\n log_predicted_death_pred_var = np.concatenate(\n (log_predicted_death_pred_var[:sum(forecast_time_idx <= data_end_date_idx)],\n log_predicted_death_pred_var_oos))\n\n log_predicted_death_lower_bound_values = log_predicted_death_values - 1.96 * np.sqrt(log_predicted_death_pred_var)\n log_predicted_death_upper_bound_values = log_predicted_death_values + 1.96 * np.sqrt(log_predicted_death_pred_var)\n\n log_predicted_death = pd.DataFrame(log_predicted_death_values, index=forecast_date_index)\n log_predicted_death_lower_bound = pd.DataFrame(log_predicted_death_lower_bound_values, index=forecast_date_index)\n log_predicted_death_upper_bound = pd.DataFrame(log_predicted_death_upper_bound_values, index=forecast_date_index)\n log_predicted_death.columns = ['predicted_death']\n log_predicted_death_lower_bound.columns = ['lower_bound']\n log_predicted_death_upper_bound.columns = ['upper_bound']\n return log_predicted_death, log_predicted_death_lower_bound, log_predicted_death_upper_bound, regr_pw.beta\n\n\ndef get_daily_predicted_death(local_death_data, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2):\n log_daily_predicted_death, lb, ub, model_beta = get_log_daily_predicted_death(local_death_data,\n forecast_horizon,\n lockdown_date,\n relax_date,\n contain_rate,\n test_rate)\n return np.exp(log_daily_predicted_death), np.exp(lb), np.exp(ub), model_beta\n\n\n\ndef get_cumulative_predicted_death(local_death_data, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2):\n daily, lb, ub, model_beta = get_daily_predicted_death(local_death_data, forecast_horizon, lockdown_date,\n relax_date, contain_rate, test_rate)\n return daily.cumsum(), lb.cumsum(), ub.cumsum(), model_beta\n\n\ndef get_daily_metrics_from_death_data(local_death_data, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2):\n daily_predicted_death, daily_predicted_death_lb, daily_predicted_death_ub, model_beta = \\\n get_daily_predicted_death(local_death_data, forecast_horizon, lockdown_date,\n relax_date, contain_rate, test_rate)\n daily_local_death_new = local_death_data.diff().fillna(0)\n daily_local_death_new.columns = ['death']\n daily_infected_cases_new = get_infected_cases(daily_predicted_death)\n daily_symptomatic_cases_new = get_symptomatic_cases(daily_predicted_death)\n daily_hospitalized_cases_new = get_hospitalized_cases(daily_predicted_death)\n daily_hospital_beds_need = get_number_hospital_beds_need(daily_predicted_death)\n daily_ICU_need = get_number_ICU_need(daily_predicted_death)\n return pd.concat([daily_local_death_new,\n daily_predicted_death,\n daily_predicted_death_lb,\n daily_predicted_death_ub,\n daily_infected_cases_new,\n daily_symptomatic_cases_new,\n daily_hospitalized_cases_new,\n daily_hospital_beds_need,\n daily_ICU_need], axis=1, sort=True), model_beta\n\n\ndef get_cumulative_metrics_from_death_data(local_death_data, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2):\n daily_metrics, model_beta = get_daily_metrics_from_death_data(local_death_data, forecast_horizon, lockdown_date,\n relax_date, contain_rate, test_rate)\n cumulative_metrics = daily_metrics.drop(columns=['ICU', 'hospital_beds']).cumsum()\n # data_end_date = max(local_death_data.index)\n # cumulative_metrics['lower_bound'] = daily_metrics['lower_bound']\n # cumulative_metrics['lower_bound'].loc[local_death_data.index] = np.nan\n # cumulative_metrics['lower_bound'].loc[data_end_date] = local_death_data.loc[data_end_date][0]\n # cumulative_metrics['lower_bound'] = cumulative_metrics['lower_bound'].cumsum()\n # cumulative_metrics['upper_bound'] = daily_metrics['upper_bound']\n # cumulative_metrics['upper_bound'].loc[local_death_data.index] = np.nan\n # cumulative_metrics['upper_bound'].loc[data_end_date] = local_death_data.loc[data_end_date][0]\n # cumulative_metrics['upper_bound'] = cumulative_metrics['upper_bound'].cumsum()\n cumulative_metrics['ICU'] = daily_metrics['ICU']\n cumulative_metrics['hospital_beds'] = daily_metrics['hospital_beds']\n\n return cumulative_metrics, model_beta\n\n\ndef get_metrics_by_country(country, forecast_horizon=60, lockdown_date=None,\n back_test=False, last_data_date=dt.date.today()):\n local_death_data = get_data_by_country(country, type='deaths')\n local_death_data_original = local_death_data.copy()\n daily_local_death_data_original = get_daily_data(local_death_data_original)\n if back_test:\n local_death_data = local_death_data[local_death_data.index.date <= last_data_date]\n local_confirmed_data = get_data_by_country(country, type='confirmed')\n daily_local_confirmed_data = get_daily_data(local_confirmed_data)\n daily_metrics, model_beta = get_daily_metrics_from_death_data(local_death_data, forecast_horizon, lockdown_date)\n daily_metrics['confirmed'] = daily_local_confirmed_data\n if back_test:\n daily_metrics['death']= daily_local_death_data_original\n cumulative_metrics = daily_metrics.drop(columns=['ICU', 'hospital_beds']).cumsum()\n cumulative_metrics['ICU'] = daily_metrics['ICU']\n cumulative_metrics['hospital_beds'] = daily_metrics['hospital_beds']\n #cumulative_metrics, model_beta = get_cumulative_metrics_from_death_data(local_death_data, forecast_horizon, lockdown_date)\n return daily_metrics, cumulative_metrics, model_beta\n\n\ndef get_metrics_by_state_US(state, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2,\n back_test=False, last_data_date=dt.date.today()):\n local_death_data = get_data_by_state(state, type='deaths')\n local_death_data_original = local_death_data.copy()\n daily_local_death_data_original = get_daily_data(local_death_data_original)\n if back_test:\n local_death_data = local_death_data[local_death_data.index.date <= last_data_date]\n local_confirmed_data = get_data_by_state(state, type='confirmed')\n daily_local_confirmed_data = get_daily_data(local_confirmed_data)\n daily_metrics, model_beta = get_daily_metrics_from_death_data(local_death_data, forecast_horizon, lockdown_date,\n relax_date, contain_rate, test_rate)\n daily_metrics['confirmed'] = daily_local_confirmed_data\n if back_test:\n daily_metrics['death']= daily_local_death_data_original\n cumulative_metrics = daily_metrics.drop(columns=['ICU', 'hospital_beds']).cumsum()\n cumulative_metrics['ICU'] = daily_metrics['ICU']\n cumulative_metrics['hospital_beds'] = daily_metrics['hospital_beds']\n return daily_metrics, cumulative_metrics, model_beta\n\n\ndef get_metrics_by_county_and_state_US(county, state, forecast_horizon=60, lockdown_date=None):\n local_death_data = get_data_by_county_and_state(county, state, type='deaths')\n local_confirmed_data = get_data_by_county_and_state(county, state, type='confirmed')\n daily_local_confirmed_data = get_daily_data(local_confirmed_data)\n daily_metrics, model_beta = get_daily_metrics_from_death_data(local_death_data, forecast_horizon, lockdown_date)\n daily_metrics['confirmed'] = daily_local_confirmed_data\n cumulative_metrics = daily_metrics.drop(columns=['ICU', 'hospital_beds']).cumsum()\n cumulative_metrics['ICU'] = daily_metrics['ICU']\n cumulative_metrics['hospital_beds'] = daily_metrics['hospital_beds']\n return daily_metrics, cumulative_metrics, model_beta\n\n\ndef get_log_daily_predicted_death_by_country(country, forecast_horizon=60, lockdown_date=None,\n back_test=False, last_data_date=dt.date.today()):\n local_death_data = get_data_by_country(country, type='deaths')\n local_death_data.columns = ['death']\n local_death_data_original = local_death_data.copy()\n daily_local_death_data_original = get_daily_data(local_death_data_original)\n log_daily_death_original = np.log(daily_local_death_data_original)\n if back_test:\n local_death_data = local_death_data[local_death_data.index.date <= last_data_date]\n log_predicted_death, log_predicted_death_lb, log_predicted_death_ub, model_beta = \\\n get_log_daily_predicted_death(local_death_data, forecast_horizon, lockdown_date)\n return pd.concat([log_daily_death_original, log_predicted_death, log_predicted_death_lb,\n log_predicted_death_ub], axis=1).replace([np.inf, -np.inf], np.nan), model_beta\n\n\ndef get_log_daily_predicted_death_by_state_US(state, forecast_horizon=60, lockdown_date=None,\n relax_date=None, contain_rate=0.5, test_rate=0.2,\n back_test=False, last_data_date=dt.date.today()):\n local_death_data = get_data_by_state(state, type='deaths')\n local_death_data.columns = ['death']\n local_death_data_original = local_death_data.copy()\n daily_local_death_data_original = get_daily_data(local_death_data_original)\n log_daily_death_original = np.log(daily_local_death_data_original)\n if back_test:\n local_death_data = local_death_data[local_death_data.index.date <= last_data_date]\n log_predicted_death, log_predicted_death_lb, log_predicted_death_ub, model_beta = \\\n get_log_daily_predicted_death(local_death_data, forecast_horizon, lockdown_date,\n relax_date, contain_rate, test_rate)\n return pd.concat([log_daily_death_original, log_predicted_death, log_predicted_death_lb,\n log_predicted_death_ub], axis=1).replace([np.inf, -np.inf], np.nan), model_beta\n\n\ndef get_log_daily_predicted_death_by_county_and_state_US(county, state, forecast_horizon=60, lockdown_date=None):\n local_death_data = get_data_by_county_and_state(county, state, type='deaths')\n daily_local_death_new = local_death_data.diff().fillna(0)\n daily_local_death_new.columns = ['death']\n log_daily_death = np.log(daily_local_death_new)\n log_predicted_death, log_predicted_death_lb, log_predicted_death_ub, model_beta = \\\n get_log_daily_predicted_death(local_death_data, forecast_horizon, lockdown_date)\n return pd.concat([log_daily_death, log_predicted_death, log_predicted_death_lb,\n log_predicted_death_ub], axis=1).replace([np.inf, -np.inf], np.nan), model_beta\n\n\ndef append_row_2_logs(row, log_file='logs/model_params_logs.csv'):\n # Open file in append mode\n with open(log_file, 'a+', newline='') as write_obj:\n # Create a writer object from csv module\n csv_writer = writer(write_obj)\n # Add contents of list as last row in the csv file\n csv_writer.writerow(row)\n\n\ndef get_table_download_link(df, filename=\"data.csv\"):\n \"\"\"Generates a link allowing the data in a given panda dataframe to be downloaded\n in: dataframe\n out: href string\n \"\"\"\n import base64\n csv = df.to_csv(index=True)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'Download csv file'\n return href\n\n", "sub_path": "prognosis/model_utils.py", "file_name": "model_utils.py", "file_ext": "py", "file_size_in_byte": 28080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 94, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 221, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 222, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 235, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 235, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.HuberRegressor", "line_number": 237, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 237, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 242, "usage_type": "call"}, {"api_name": "pwlf_mod.PiecewiseLinFit", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 258, "usage_type": "call"}, {"api_name": "pwlf_mod.PiecewiseLinFit", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 262, "usage_type": "call"}, {"api_name": "sklearn.linear_model.HuberRegressor", "line_number": 272, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 272, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 277, "usage_type": "call"}, {"api_name": "pwlf_mod.PiecewiseLinFit", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 279, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 290, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 306, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 308, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 309, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 380, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 380, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 401, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 401, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 433, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 433, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 438, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 444, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 444, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 449, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 449, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 454, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 460, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 461, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 461, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 468, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 472, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 472, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 479, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 491, "usage_type": "call"}, {"api_name": "csv.encode", "line_number": 491, "usage_type": "call"}]} {"seq_id": "444975053", "text": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass PreValidateEnableBackupRequest(Model):\n \"\"\"Contract to validate if backup can be enabled on the given resource in a\n given vault and given configuration.\n It will validate followings\n 1. Vault capacity\n 2. VM is already protected\n 3. Any VM related configuration passed in properties.\n\n :param resource_type: ProtectedItem Type- VM, SqlDataBase, AzureFileShare\n etc. Possible values include: 'Invalid', 'VM', 'FileFolder', 'AzureSqlDb',\n 'SQLDB', 'Exchange', 'Sharepoint', 'VMwareVM', 'SystemState', 'Client',\n 'GenericDataSource', 'SQLDataBase', 'AzureFileShare', 'SAPHanaDatabase',\n 'SAPAseDatabase'\n :type resource_type: str or\n ~azure.mgmt.recoveryservicesbackup.models.DataSourceType\n :param resource_id: ARM Virtual Machine Id\n :type resource_id: str\n :param vault_id: Specifies the arm resource id of the vault\n :type vault_id: str\n :param properties: Configuration of VM if any needs to be validated like\n OS type etc\n :type properties: str\n \"\"\"\n\n _attribute_map = {\n 'resource_type': {'key': 'resourceType', 'type': 'str'},\n 'resource_id': {'key': 'resourceId', 'type': 'str'},\n 'vault_id': {'key': 'vaultId', 'type': 'str'},\n 'properties': {'key': 'properties', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(PreValidateEnableBackupRequest, self).__init__(**kwargs)\n self.resource_type = kwargs.get('resource_type', None)\n self.resource_id = kwargs.get('resource_id', None)\n self.vault_id = kwargs.get('vault_id', None)\n self.properties = kwargs.get('properties', None)\n", "sub_path": "sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/pre_validate_enable_backup_request.py", "file_name": "pre_validate_enable_backup_request.py", "file_ext": "py", "file_size_in_byte": 2153, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "msrest.serialization.Model", "line_number": 15, "usage_type": "name"}]} {"seq_id": "488905886", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('newshub', '0021_society'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='society',\n old_name='administrators',\n new_name='admins',\n ),\n ]\n", "sub_path": "newshub/migrations/0022_auto_20160112_1929.py", "file_name": "0022_auto_20160112_1929.py", "file_ext": "py", "file_size_in_byte": 385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RenameField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}]} {"seq_id": "418627852", "text": "\"\"\"\nCopyright (c) 2018 Mejbah ul Alam, Justin Gottschlich, Abdullah Muzahid\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"\n\n\"\"\"\nFile name: autoperf.py\nFile description: Main script for training models and test AutoPerf\n\"\"\"\n\nimport sys\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg') #instead of Xserver for png\nimport matplotlib.pyplot as plt\nimport pprint\nimport numpy as np\nimport plot_utils\nimport os\nimport errno\nimport configs\nfrom utils import *\nfrom random import shuffle\nimport keras_autoencoder\nimport copy\nfrom collections import namedtuple\n\n\n\"\"\"\nDefintions of tuples used by this script\n\"\"\"\nAnomalyTuple = namedtuple('AnomalyTuple', 'run, sample_count, anomalous_sample_count, ranking')\nAccuracyTuple = namedtuple('AccuracyTuple', 'true_positive, false_negative, false_positive, true_negative')\n\n\n\"\"\"\nCreate list of sample with all the counter values from profile data\n\"\"\"\ndef getPerfDataset( dirName , numberOfCounters ):\n datasetHeader = []\n dataset = []\n eventID = 0\n for i in range(0, numberOfCounters):\n #if i==2 or i==15 or i==16 :\n # continue #TODO: 2 counters are not set in PAPI, temp fix , remove this once problem is resolved\n\n filename = dirName + \"/event_\" + str(eventID) + \"_perf_data.csv\"\n eventID += 1\n with open(filename, 'r') as fp:\n for linenumber,line in enumerate(fp):\n if linenumber == 2:\n headers = line.strip().split(\",\") #last one is the counter, 1 and 2 is thd id and instcouunt , 0 is mark id\n datasetHeader.append(headers[-1])\n if linenumber > 2:\n perfCounters = line.strip().split(\",\")\n mark = int(perfCounters[0])\n threadCount = int(perfCounters[1])\n instructionCount = int(perfCounters[2])\n currCounter = int(perfCounters[3])\n\n normalizedCounter = ( currCounter / ( instructionCount * threadCount ) )* configs.SCALE_UP_FACTOR\n if i==0:\n newSample = []\n newSample.append(normalizedCounter)\n dataset.append(newSample)\n dataset[linenumber-3].append(normalizedCounter)\n\n return datasetHeader, dataset\n\n\n\"\"\"\nConvert list to numpy array\n\"\"\"\ndef getDatasetArray( dataset ):\n\n dataArray = np.array(dataset, dtype='float32')\n return dataArray\n\n\n\"\"\"\nError function definition\n\"\"\"\ndef getMSE(old, new):\n squaredError = ((old - new) ** 2)\n return np.mean(squaredError)\n #return mean_squared_error(old,new)\n\n\n\"\"\"\nDistance calculation between reconstructed and original inputs\n\"\"\"\ndef getNormalizedDistance( old, new ):\n dist = np.linalg.norm(old-new)\n origin = np.linalg.norm(old)\n return dist/origin\n\n\n\"\"\"\nFunction for ranking of anomalies\nin : list of tuple of (event,error)\nout : ranking and majority voting update\n\"\"\"\ndef rankAnomalousPoint( sampleErrorsTuple, rankingMap ):\n\n sampleErrorsTuple.sort(key=lambda tup : tup[1], reverse=True)\n for index, errorTuple in enumerate(sampleErrorsTuple):\n\t rankingMap[errorTuple[0]][index] += 1\n\n return rankingMap\n\n\n\"\"\"\nReport ranking based on majority voting result\nin : rankingMap -- key: counter_name, val: vote count for each pos/rank\nout: list of rank for the entire execution anomaly root cause\n\"\"\"\ndef reportRanks( rankingMap ) :\n\n anomalousCounterRank = []\n for i in range(configs.NUMBER_OF_COUNTERS): ##for each possible ranks\n maxVote = 0\n maxkey = \"None\"\n for key in rankingMap:\n if rankingMap[key][i] > maxVote:\n maxVote = rankingMap[key][i]\n maxkey = key\n\n anomalousCounterRank.append(maxkey)\n\n return anomalousCounterRank\n\n\n\"\"\"\nCalculate reconstruction error : normalized distance\n\"\"\"\ndef detectAnomalyPoints( realData, predictedData, datasetHeader, thresholdLoss, outFile=None ):\n datasetLen = realData.shape[0]\n dataLen = realData.shape[1]\n anomalyCount = 0\n\n \"\"\"\n map for ranking :\n key = counter name, val = list of len(NUMBER_OF_COUNTERS)\n each pos corresponds to rank, smaller is better\n \"\"\"\n rankingMap = {}\n for counterName in datasetHeader:\n rankingMap[counterName] = [0] * configs.NUMBER_OF_COUNTERS\n\n reconstructErrorList = []\n for x in range(datasetLen):\n #reconstructionError = getNormalizedDistance( realData[x], predictedData[x] )\n reconstructionError = getMSE( realData[x], predictedData[x] )\n #for debugging\n reconstructErrorList.append(reconstructionError)\n #end for debugging\n #if(reconstructionError > configs.THRESHOLD_ERROR):\n if(reconstructionError > thresholdLoss):\n anomalyCount += 1\n errorList = [] #for ranking\n for y in range(0, dataLen):\n dist=abs(predictedData[x][y]-realData[x][y]) / realData[x][y]\n #collect errors and counter tuple\n errorList.append( (datasetHeader[y], dist) )\n #update ranking\n rankingMap = rankAnomalousPoint(errorList, rankingMap)\n\n votingResult = reportRanks( rankingMap )\n\n return datasetLen, anomalyCount, votingResult\n\n\n\n\"\"\"\nCalculate a threshold based on distribution of reconstrucion errors\n\"\"\"\ndef getReconstructionErrorThreshold( model, perfTestDataDir, runs ):\n\n errors = []\n for run in runs:\n print((\"Testing with\", run))\n datadir = perfTestDataDir + \"/\" + run\n datasetHeader, dataset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n dataArray = getDatasetArray(dataset)\n realData = preprocessDataArray(dataArray)\n predictedData = model.predict(realData)\n reconstructErrorList = []\n datasetLen = realData.shape[0]\n for x in range(datasetLen):\n #reconstructionError = getNormalizedDistance( realData[x], predictedData[x] )\n reconstructionError = getMSE( realData[x], predictedData[x] )\n reconstructErrorList.append(reconstructionError)\n\n errors.append(np.mean(reconstructErrorList) + np.std(reconstructErrorList))\n\n\n return (np.mean(errors) + np.std(errors))\n\n\n\"\"\"\nRun inference with dataset for test runs(executions) in the Datadir, write results in outFile\nand return number of runs found as anomalous\n\"\"\"\ndef testAutoencoder( model, perfTestDataDir, runs, outFile, threshold_error ):\n anomalousRunCount = 0\n for run in runs:\n print((\"Testing with\", run))\n datadir = perfTestDataDir + \"/\" + run\n\n datasetHeader, dataset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n dataArray = getDatasetArray(dataset)\n dataArray = preprocessDataArray(dataArray)\n reconstructedData, anomalyCount = runTrainedAutoencoder( model, dataArray, datasetHeader, threshold_error, outFile )\n if anomalyCount > 0 :\n\t print(\"Anomaly found in execution \", run, \" in\", anomalyCount, \" samples\", file=outFile)\n\t anomalousRunCount += 1\n\n return anomalousRunCount\n\n\n\"\"\"\nReturn list of tuple(run,total_sample, anomalous_sample, ranking)\n\"\"\"\ndef testAnomaly( model, testDataDir, runs, threshold_error ):\n test_summary = []\n for run in runs:\n datadir = testDataDir + \"/\" + run\n datasetHeader, dataset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n dataArray = getDatasetArray(dataset)\n dataArray = preprocessDataArray(dataArray)\n decoded_data = keras_autoencoder.predict( model, dataArray )\n datasetLen, anomalyCount, ranking = detectAnomalyPoints(dataArray, decoded_data, datasetHeader, threshold_error)\n if anomalyCount > datasetLen * configs.PERCENT_SAMPLE_FOR_ANOMALY : ##TODO: use a thresold, small % of anomaly can me ignored\n test_summary.append(AnomalyTuple(run = run, sample_count=datasetLen, anomalous_sample_count=anomalyCount, ranking = ranking))\n return test_summary\n\n\n\"\"\"\nRun trained autoencoder and detect anomalous samples based on 'thresoldLoss'\nand write output in 'outFile'\n\"\"\"\ndef runTrainedAutoencoder( model, testDataArray, datasetHeader, thresholdLoss, outFile ):\n\n decoded_data = keras_autoencoder.predict( model, testDataArray )\n dataLen, anomalyCount, ranking = detectAnomalyPoints(testDataArray, decoded_data, datasetHeader, thresholdLoss)\n\n return decoded_data, anomalyCount\n\n\n\"\"\"\nPreprocessing of data\n\"\"\"\ndef preprocessDataArray( dataset ):\n #zero centering\n #mean_vector = np.mean(dataset, axis=0)\n #processed_dataset = dataset - mean_vector\n\n #normalize\n #dataset /= np.std(dataset, axis=0)\n\n ##normalize with max in each column : not working some 0 values?? why not avoid zeros\n datasetMax = np.max(dataset, axis=0)\n processed_dataset = np.nan_to_num(np.true_divide(dataset,datasetMax))\n\n return processed_dataset\n\n\n\"\"\"\nUtil function for aggregating data files from all profile runs\n\"\"\"\ndef getTrainDataSequence(dataDir, testDir=None, validationDir=None ):\n runs = os.listdir(dataDir) ##no sequence of directory assigned, it should not matter\n\n print((\"Total execution/directory found for training data:\" , len(runs)))\n return runs\n\n\n\"\"\"\nUtil function for analyzing dataset\n\"\"\"\ndef analyzeVariationInData( dataDir, testDir=None, validationDir=None ):\n\n runs = os.listdir(dataDir)\n results = []\n baseDataset = None\n for counter, run in enumerate(runs):\n datadir = dataDir + \"/\" + run\n datasetHeader, dataset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n if counter == 0:\n baseDataset = getDatasetArray(dataset)\n results.append( (run,0))\n else:\n if counter < 50:\n #results.append(compareDataset(baseDataset, getDatasetArray(dataset)))\n results.append(( run, compareDataset(baseDataset, getDatasetArray(dataset))))\n\n testResults = []\n if testDir != None:\n runs = os.listdir(testDir)\n for counter, run in enumerate(runs):\n datadir = dataDir + \"/\" + run\n datasetHeader, dataset = getPerfDataset( datadir, configs.NUMBER_OF_COUNTERS )\n testResults.append(compareDataset( baseDataset, getDatasetArray(dataset)))\n\n validationResults = []\n if validationDir != None:\n runs = os.listdir(validationDir)\n for counter, run in enumerate(runs):\n datadir = dataDir + \"/\" + run\n datasetHeader, dataset = getPerfDataset( datadir, configs.NUMBER_OF_COUNTERS )\n validationResults.append(compareDataset( baseDataset, getDatasetArray(dataset)))\n\n sorted_results = sorted(results, key=lambda tup: tup[1], reverse=True)\n return sorted_results, testResults, validationResults\n\n\n\"\"\"\nI/O util for writing log of results\n\"\"\"\ndef writeTestLog(logFile, anomalySummary):\n for tuple in anomalySummary:\n print(tuple.run, tuple.sample_count, tuple.anomalous_sample_count, file=logFile)\n ## AnomalyTuple(run = run, sample_count=dataLen, anomalous_sample_count=anomalyCount, ranking = ranking)\n\n\n\"\"\"\nInferencing of model and determination of anomalies\n\"\"\"\ndef testModel( model, threshold_error, nonAnomalousDataDir, anomalousDataDir, logFile=None ):\n print(\"..Testing Non-anomalous\")\n negative_runs = os.listdir(nonAnomalousDataDir)\n anomaly_summary = testAnomaly( model, nonAnomalousDataDir, negative_runs, threshold_error )\n if logFile != None:\n print(\"\\n..Testing nonAnomalousData..\\n\", file=logFile)\n writeTestLog(logFile, anomaly_summary)\n false_positive = len(anomaly_summary)\n true_negative = len(negative_runs) - false_positive\n print(\"..Testing Anomalous\")\n positive_runs = os.listdir(anomalousDataDir)\n anomaly_summary = testAnomaly( model, anomalousDataDir, positive_runs, threshold_error )\n if logFile != None:\n print(\"\\n..Testing AnomalousData..\\n\", file=logFile)\n writeTestLog(logFile, anomaly_summary)\n true_positive = len(anomaly_summary)\n false_negative = len(positive_runs) - true_positive\n return AccuracyTuple(true_positive=true_positive, false_negative = false_negative, false_positive=false_positive, true_negative=true_negative)\n\n\n\"\"\"\nFunction for gathering statistics on accuracy of a model for a specific test dataset and validation dataset\n\"\"\"\ndef testModelAccuracy( model, outFilename, threshold_error, perfTestDataDir, perfValidDataDir ):\n\n outFile = open(outFilename, 'w')\n ## test for anomalous dataset\n runs = os.listdir(perfTestDataDir)\n\n\n anomalousRunCount = testAutoencoder( model, perfTestDataDir, runs, outFile, threshold_error )\n\n truePositive = anomalousRunCount ## a\n falseNegative = len(runs) - anomalousRunCount ## b\n print(\"Actual Positive\", len(runs), file=outFile)\n print(\"True Positive\", anomalousRunCount, file=outFile)\n print(\"False Negative\", len(runs) - anomalousRunCount, file=outFile)\n\n print((\"Total run \", len(runs)))\n print((\"Total anomalous run found \", anomalousRunCount))\n\n\n ##validation with correct(not anomalous) dataset\n runs = os.listdir(perfValidDataDir)\n\n anomalousRunCount = testAutoencoder( model, perfValidDataDir, runs, outFile, threshold_error )\n\n trueNegative = len(runs) - anomalousRunCount ## d\n falsePositive = anomalousRunCount ## c\n print(\"Actual Negative\", len(runs), file=outFile)\n print(\"True Negative\", len(runs) - anomalousRunCount, file=outFile)\n print(\"False Positive\", anomalousRunCount, file=outFile)\n\n print((\"Total run \", len(runs)))\n print((\"Total anomalous run found \", anomalousRunCount))\n\n ##calculate F score\n precision = 0\n if truePositive+falsePositive != 0:\n precision = float(truePositive)/(truePositive+falsePositive)\n\n recall = float(truePositive)/(truePositive+falseNegative)\n fscore = 0\n if precision + recall != 0:\n fscore = 2* (precision*recall)/(precision+recall) ##harmonic mean of precision and recall\n\n print(\"Precision\" , precision, file=outFile)\n print(\"Recall\", recall, file=outFile)\n print(\"Fscore\", fscore, file=outFile)\n outFile.close()\n\n\n print((\"Report: \", outFilename))\n\n\n\n\"\"\"\nAggregate all data and train with all data at once\n\"\"\"\n\ndef perfAnalyzerMainTrain( perfTrainDataDir, outputDir, autoencoder, threshold_final=None , saveTrainedNetwork=False):\n\n model = None\n\n training_sequence = getTrainDataSequence(perfTrainDataDir)\n train_loss_list = []\n reconstruction_error_list = []\n validation_loss_list = []\n dataset = []\n for train_run in training_sequence:\n datadir = perfTrainDataDir + \"/\" + train_run\n redundantHeader, additionalDatatset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n processed_dataset_array = preprocessDataArray(getDatasetArray(additionalDatatset))\n dataset.extend(processed_dataset_array.tolist())\n\n if len(dataset) < configs.NUMBER_OF_COUNTERS * 2:\n print(\"Not enough data for training this iteration\")\n sys.exit(1)\n else:\n print((\"Training dataset size\", len(dataset)))\n trainingDataset = getDatasetArray(dataset)\n if model == None:\n model, train_loss, validation_loss = keras_autoencoder.trainAutoencoder( autoencoder, trainingDataset )\n train_loss_list.extend(train_loss)\n validation_loss_list.extend(validation_loss)\n else:\n model, train_loss, validation_loss = keras_autoencoder.trainAutoencoder( model, trainingDataset )\n train_loss_list.extend(train_loss)\n validation_loss_list.extend(validation_loss)\n if saveTrainedNetwork == True :\n model.save(outputDir + \"/\" + configs.MODEL_SAVED_FILE_NAME + \"_\"+ str(i))\n\n return train_loss_list, validation_loss_list\n\n\n\n\"\"\"\nTrain data with one or more exeuction data in one batch\n\"\"\"\ndef perfAnalyzerMainTrainSequence( perfTrainDataDir, outputDir, autoencoder, threshold_final=None , saveTrainedNetwork=False):\n\n model = None\n\n training_sequence = getTrainDataSequence(perfTrainDataDir)\n #for epoch in configs.EXPERIMENT_EPOCHS:\n train_loss_list = []\n reconstruction_error_list = []\n validation_loss_list = []\n #for train_run in training_sequence:\n i = 0\n while i < len(training_sequence):\n train_run = training_sequence[i]\n print((\"Training with\", train_run))\n datadir = perfTrainDataDir + \"/\" + train_run\n\n datasetHeader, dataset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n\n\n while i < len(training_sequence):\n print (\"small input, adding more data for the batch\")\n if(i+1 == len(training_sequence)):\n break ## handle error\n print((\"adding \", training_sequence[i+1]))\n datadir = perfTrainDataDir + \"/\" + training_sequence[i+1]\n i += 1\n redundantHeader, additionalDatatset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n dataset.extend(additionalDatatset)\n i += 1\n dataArray = getDatasetArray(dataset)\n trainingDataset = preprocessDataArray(dataArray)\n\n if len(dataset) < configs.NUMBER_OF_COUNTERS * 2:\n print(\"Not enough data for training this iteration\")\n break\n if model == None:\n model, train_loss, validation_loss = keras_autoencoder.trainAutoencoder( autoencoder, trainingDataset)\n train_loss_list.extend(train_loss)\n validation_loss_list.extend(validation_loss)\n else:\n model, train_loss, validation_loss = keras_autoencoder.trainAutoencoder( model, trainingDataset )\n train_loss_list.extend(train_loss)\n validation_loss_list.extend(validation_loss)\n if saveTrainedNetwork == True :\n model.save(outputDir + \"/\" + configs.MODEL_SAVED_FILE_NAME + \"_\"+ str(i))\n\n return train_loss_list, validation_loss_list\n\n\n\"\"\"\nUtil function for determining different topologies\n\"\"\"\ndef getRangeOfNode( n ):\n\n list_of_numbers = []\n for i in range( int(n/4), int(3*n/4) ):\n if i> 5: #TODO: limit ???\n list_of_numbers.append(i)\n return list_of_numbers\n\n\n\"\"\"\nRun inference on input data and calculate reconstrution error\n@return : list of reconstrution errors\n\"\"\"\ndef getReconstructionErrors( perfTestDataDir, model ):\n\n runs = os.listdir(perfTestDataDir)\n reconstructErrorList = []\n for run in runs:\n #print (\"Reconstruction of execution \", run)\n datadir = perfTestDataDir + \"/\" + run\n\n datasetHeader, dataset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n dataArray = getDatasetArray(dataset)\n dataArray = preprocessDataArray(dataArray)\n\n datasetLen = dataArray.shape[0]\n decodedData = keras_autoencoder.predict( model, dataArray )\n for x in range(datasetLen):\n #reconstructionError = getNormalizedDistance( realData[x], predictedData[x] )\n reconstructionError = getMSE( dataArray[x], decodedData[x] )\n #for debugging\n reconstructErrorList.append(reconstructionError)\n return reconstructErrorList\n\n\n\n\"\"\"\nAggregate all data and train with all data at once\n@return trained model\n\"\"\"\n\ndef aggregateAndTrain( perfTrainDataDir, autoencoder, saveTrainedNetwork=False,outputDir=None ):\n\n training_sequence = getTrainDataSequence(perfTrainDataDir)\n train_loss_list = []\n reconstruction_error_list = []\n validation_loss_list = []\n dataset = []\n for train_run in training_sequence:\n datadir = perfTrainDataDir + \"/\" + train_run\n redundantHeader, additionalDatatset = getPerfDataset( datadir , configs.NUMBER_OF_COUNTERS )\n processed_dataset_array = preprocessDataArray(getDatasetArray(additionalDatatset))\n dataset.extend(processed_dataset_array.tolist())\n\n if len(dataset) < configs.NUMBER_OF_COUNTERS * 2:\n print(\"Not enough data for training this iteration\")\n sys.exit(1)\n else:\n print((\"Training dataset size\", len(dataset)))\n trainingDataset = getDatasetArray(dataset)\n model, train_loss, validation_loss = keras_autoencoder.trainAutoencoder( autoencoder, trainingDataset )\n train_loss_list.extend(train_loss)\n validation_loss_list.extend(validation_loss)\n\n if saveTrainedNetwork == True :\n model.save(outputDir + \"/\" + configs.MODEL_SAVED_FILE_NAME + \"_\"+ str(i))\n\n return model\n\n\n\"\"\"\nCalculates threshold of reconstruction errors = mean + 3 * std of reconstruction errors\n\"\"\"\ndef calcThresoldError(reconstructionErrors):\n meanVal = np.mean(reconstructionErrors)\n meanVal += ( 3 * np.std(reconstructionErrors))\n return meanVal\n\n\n\"\"\"\nTrain and test one autoencoder model with the datasets stored in the given directories\n\"\"\"\ndef trainAndTest( autoencoder, trainDataDir, nonAnomalousTestDir, anomalousTestDataDir, logFile=None ):\n model = aggregateAndTrain( trainDataDir, autoencoder )\n\n print(\"..Training Complete\")\n datasetTrainErrors = getReconstructionErrors(trainDataDir, model)\n threshold_error = calcThresoldError(datasetTrainErrors)\n\n print(\"..Thresold determined\")\n print(\"Threshold : \", threshold_error, file=logFile)\n\n test_result = testModel( model, threshold_error, nonAnomalousTestDir, anomalousTestDataDir, logFile )\n\n model_string = keras_autoencoder.getAutoendoerShape( model )\n\n return (model_string, threshold_error, test_result)\n\n\n\n\"\"\"\nCreate autoencoders from lists of input length and array of layers\n\"\"\"\ndef getTopologies( inputLen, numberOfLayers ):\n\n candidate_autoencoders = []\n prefix = []\n for layer in range(numberOfLayers):\n number_of_prefix = 1\n if layer > 0:\n number_of_prefix = len(prefix)\n for prefix_layer in range(number_of_prefix):\n curr_layer_nodes = 1\n if layer == 0:\n curr_layer_nodes = getRangeOfNode(inputLen)\n else:\n curr_layer_nodes = getRangeOfNode(prefix[prefix_layer][-1])\n for n in curr_layer_nodes:\n new_prefix = []\n if layer > 0:\n for nodes in prefix[prefix_layer]:\n new_prefix.append(nodes)\n\n\n #print \"curr network: \", inputLen, new_prefix, n\n autoencoder = keras_autoencoder.getAutoencoder(inputLen, n, new_prefix)\n candidate_autoencoders.append(autoencoder)\n new_prefix.append(n) #add new layer nodes\n prefix.append(new_prefix) #store prefix in this layer\n\n\n prefix = prefix[number_of_prefix-1:]\n\n return candidate_autoencoders\n\n\n\n\"\"\"\nTrain and test provided list of candidate_autoencoders\nor serach all possible networks in range of NUMBER_OF_HIDDEN_LAYER_TO_SEARCH\n\"\"\"\ndef AutoPerfMain(candidate_autoencoders=None):\n trainDataDir=sys.argv[1]\n nonAnomalousTestDataDir=sys.argv[2]\n anomalousTestDataDir=sys.argv[3]\n outputDir = sys.argv[4]\n mkdir_p(outputDir)\n\n if candidate_autoencoders == None :\n candidate_autoencoders = getTopologies( configs.NUMBER_OF_COUNTERS, configs.NUMBER_OF_HIDDEN_LAYER_TO_SEARCH )\n\n out_file = open(outputDir + \"/autoperf.out\", 'w')\n log_file = open(outputDir + \"/autoperf.log\",'w')\n print(\"Number of autoencoder topologies:\", len(candidate_autoencoders), file=log_file)\n\n print(\"network, error, true_positive, false_negative, true_negative, false_positive\", file=out_file)\n\n print(\"Train: \", trainDataDir, file=log_file)\n print(\"Test(nonAnomalous): \", nonAnomalousTestDataDir, file=log_file)\n print(\"Test(Anomalous): \", anomalousTestDataDir, file=log_file)\n out_file.close()\n log_file.close()\n\n for i in range(len(candidate_autoencoders)):\n out_file = open(outputDir + \"/autoperf.out\", 'a')\n log_file = open(outputDir + \"/autoperf.log\", 'a')\n print((\"..Autoencder topology: \", keras_autoencoder.getAutoendoerShape(candidate_autoencoders[i])))\n print(\"\\n..Autoencder topology: \", keras_autoencoder.getAutoendoerShape(candidate_autoencoders[i]), file=log_file)\n output = trainAndTest( candidate_autoencoders[i], trainDataDir, nonAnomalousTestDataDir, anomalousTestDataDir, log_file )\n\n ##output -> (model_string, threshold_error, test_result)\n print(output[0], output[1], output[2].true_positive, output[2].false_negative, output[2].true_negative, output[2].false_positive, file=out_file)\n out_file.close()\n log_file.close()\n\n\n print((\"..Output to file \", outputDir+\"/autoperf.out\"))\n print((\"..Log file \", outputDir+\"/autoperf.log\"))\n\n\n\n\nif __name__ == \"__main__\" :\n\n if(len(sys.argv) < 4):\n print(\"Usage: autoperf.py path/to/trainingdata path/to/noAnomalousTestData path/to/anomalousTestData path/to/output\")\n sys.exit()\n\n input_dim = configs.NUMBER_OF_COUNTERS\n #hidden_dims = [[ 16, 8 ], [16], [8], [16,8,4]]\n #encoding_dims = [4, 8, 4, 2]\n hidden_dims = [[ 16 ]]\n encoding_dims = [8]\n\n\n candidate_autoencoders = []\n for hidden_dim, encoding_dim in zip(hidden_dims, encoding_dims):\n autoencoder = keras_autoencoder.getAutoencoder(input_dim, encoding_dim, hidden_dim)\n candidate_autoencoders.append(autoencoder)\n AutoPerfMain(candidate_autoencoders)\n #AutoPerfMain()\n", "sub_path": "autoperf/autoperf.py", "file_name": "autoperf.py", "file_ext": "py", "file_size_in_byte": 24688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "matplotlib.use", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 38, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 39, "usage_type": "call"}, {"api_name": "configs.SCALE_UP_FACTOR", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 100, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 126, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 154, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 203, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 216, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 234, "usage_type": "attribute"}, {"api_name": "keras_autoencoder.predict", "line_number": 237, "usage_type": "call"}, {"api_name": "configs.PERCENT_SAMPLE_FOR_ANOMALY", "line_number": 239, "usage_type": "attribute"}, {"api_name": "keras_autoencoder.predict", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.true_divide", "line_number": 269, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 278, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 289, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 294, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 305, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 308, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 313, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 316, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 337, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 345, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 362, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 378, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 426, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 430, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 432, "usage_type": "call"}, {"api_name": "keras_autoencoder.trainAutoencoder", "line_number": 437, "usage_type": "call"}, {"api_name": "keras_autoencoder.trainAutoencoder", "line_number": 441, "usage_type": "call"}, {"api_name": "configs.MODEL_SAVED_FILE_NAME", "line_number": 445, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 470, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 480, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 486, "usage_type": "attribute"}, {"api_name": "keras_autoencoder.trainAutoencoder", "line_number": 490, "usage_type": "call"}, {"api_name": "keras_autoencoder.trainAutoencoder", "line_number": 494, "usage_type": "call"}, {"api_name": "configs.MODEL_SAVED_FILE_NAME", "line_number": 498, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 521, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 527, "usage_type": "attribute"}, {"api_name": "keras_autoencoder.predict", "line_number": 532, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 556, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 560, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 562, "usage_type": "call"}, {"api_name": "keras_autoencoder.trainAutoencoder", "line_number": 566, "usage_type": "call"}, {"api_name": "configs.MODEL_SAVED_FILE_NAME", "line_number": 571, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 581, "usage_type": "call"}, {"api_name": "keras_autoencoder.getAutoendoerShape", "line_number": 600, "usage_type": "call"}, {"api_name": "keras_autoencoder.getAutoencoder", "line_number": 631, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 648, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 649, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 650, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 651, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 655, "usage_type": "attribute"}, {"api_name": "configs.NUMBER_OF_HIDDEN_LAYER_TO_SEARCH", "line_number": 655, "usage_type": "attribute"}, {"api_name": "keras_autoencoder.getAutoendoerShape", "line_number": 672, "usage_type": "call"}, {"api_name": "keras_autoencoder.getAutoendoerShape", "line_number": 673, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 690, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 692, "usage_type": "call"}, {"api_name": "configs.NUMBER_OF_COUNTERS", "line_number": 694, "usage_type": "attribute"}, {"api_name": "keras_autoencoder.getAutoencoder", "line_number": 703, "usage_type": "call"}]} {"seq_id": "474387356", "text": "\"\"\"UI Coverage for a CFME/MIQ Appliance\n\nUsage\n-----\n\n``py.test --ui-coverage``\n\nGeneral Notes\n-------------\nsimplecov can merge test results, but doesn't appear to like working in a\nmulti-process environment. Specifically, it clobbers its own results when running\nsimultaneously in multiple processes. To solve this, each process records its\noutput to its own directory (configured in coverage_hook). All of the\nindividual process' results are then manually merged (coverage_merger) into one\nbig json result, and handed back to simplecov which generates the compiled html\n(for humans) and rcov (for jenkins) reports.\n\nthing_toucher makes a best-effort pass at requiring all of the ruby files in\nthe rails root, as well as any external MIQ libs/utils outside of the rails\nroot (../lib and ../lib/util). This makes sure files that are never\nrequired still show up in the coverage report.\n\nWorkflow Overview\n-----------------\n\nPre-testing (``pytest_configure`` hook):\n\n1. Add ``Gemfile.dev.rb`` to the rails root, then run bundler to install simplecov\n and its dependencies.\n2. Install and require the coverage hook (copy ``coverage_hook`` to config/, add\n require line to the end of ``config/boot.rb``)\n3. Restart EVM (Rudely) to start running coverage on the appliance processes:\n ``killall -9 ruby; service evmserverd start``\n4. TOUCH ALL THE THINGS (run ``thing_toucher.rb`` with the rails runner).\n Fork this process off and come back to it later\n\nPost-testing (``pytest_unconfigure`` hook):\n\n1. Poll ``thing_toucher`` to make sure it completed; block if needed.\n2. Stop EVM, but nicely this time so the coverage atexit hooks run:\n ``service evmserverd stop``\n3. Run ``coverage_merger.rb`` with the rails runner, which compiles all the individual process\n reports and runs coverage again, additionally creating an rcov report\n4. Pull the coverage dir back for parsing and archiving\n5. For fun: Read the results from ``coverage/.last_run.json`` and print it to the test terminal/log\n\nPost-testing (e.g. ci environment):\n1. Use the generated rcov report with the ruby stats plugin to get a coverage graph\n2. Zip up and archive the entire coverage dir for review\n\n\"\"\"\nimport json\nimport os\nfrom glob import glob\n\nfrom py.error import ENOENT\nfrom py.path import local\n\nfrom fixtures.pytest_store import store\nfrom utils.log import logger\nfrom utils.path import log_path, scripts_data_path\n\n# paths to all of the coverage-related files\n\n# on the appliance\n#: Corresponds to Rails.root in the rails env\nrails_root = local('/var/www/miq/vmdb')\n#: coverage root, should match what's in the coverage hook and merger scripts\nappliance_coverage_root = rails_root.join('coverage')\n\n# local\ncoverage_data = scripts_data_path.join('coverage')\ngemfile = coverage_data.join('Gemfile.dev.rb')\ncoverage_hook = coverage_data.join('coverage_hook.rb')\ncoverage_merger = coverage_data.join('coverage_merger.rb')\nthing_toucher = coverage_data.join('thing_toucher.rb')\ncoverage_output_dir = log_path.join('coverage')\n\n\ndef _thing_toucher_mp_handler(ssh_client):\n # for use in a subprocess to kick off the thing toucher\n x, out = ssh_client.run_rails_command('thing_toucher.rb')\n return x\n\n\ndef clean_coverage_dir():\n try:\n coverage_output_dir.remove(ignore_errors=True)\n except ENOENT:\n pass\n coverage_output_dir.ensure(dir=True)\n\n\nclass UiCoveragePlugin(object):\n def pytest_configure(self, config):\n if store.parallelizer_role != 'master':\n store.current_appliance.install_coverage()\n\n if store.parallelizer_role != 'slave':\n clean_coverage_dir()\n\n def pytest_sessionfinish(self, exitstatus):\n # Now master/standalone needs to move all the reports to an appliance for the source report\n if store.parallelizer_role != 'slave':\n store.terminalreporter.write_sep('-', 'collecting coverage reports')\n else:\n store.slave_manager.message('collecting coverage reports')\n\n if store.parallelizer_role != 'master':\n store.current_appliance.collect_coverage_reports()\n\n # for slaves, everything is done at this point\n if store.parallelizer_role == 'slave':\n return\n\n # The rest should be only happening in the master/sandalone process\n results_tgzs = glob(coverage_output_dir.join('*-coverage-results.tgz').strpath)\n if not results_tgzs:\n # Not sure if we should explode here or not.\n logger.error('No coverage results collected')\n store.terminalreporter.write_sep('=', 'No coverage results found', red=True)\n return\n\n # push the results to the appliance\n ssh_client = store.current_appliance.ssh_client()\n for results_tgz in results_tgzs:\n dest_file = appliance_coverage_root.join(os.path.basename(results_tgz)).strpath\n ssh_client.put_file(results_tgz, dest_file)\n ssh_client.run_command('tar xvaf {} -C /var/www/miq/vmdb/coverage'.format(dest_file))\n\n # run the merger on the appliance to generate the simplecov report\n store.terminalreporter.write_sep('-', 'merging coverage reports')\n ssh_client.put_file(coverage_merger.strpath, rails_root.strpath)\n ssh_client.run_rails_command(coverage_merger.basename)\n\n # Now bring the report back and write out the info\n # TODO: We're already using tar, might as well tar this up, too.\n ssh_client.get_file(\n appliance_coverage_root.join('merged').strpath,\n coverage_output_dir.strpath,\n recursive=True\n )\n\n def pytest_unconfigure(self, config):\n try:\n last_run = json.load(log_path.join('coverage', 'merged', '.last_run.json').open())\n coverage = last_run['result']['covered_percent']\n # TODO: We don't currently know what a \"good\" coverage number is.\n style = {'bold': True}\n if coverage > 40:\n style['green'] = True\n else:\n style['red'] = True\n store.terminalreporter.line('UI Coverage Result: {}%'.format(coverage), **style)\n except KeyboardInterrupt:\n # don't block this, so users can cancel out\n raise\n except Exception as ex:\n logger.error('Error printing coverage report to terminal')\n logger.exception(ex)\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup('cfme')\n group.addoption('--ui-coverage', dest='ui_coverage', action='store_true', default=False,\n help=\"Enable setup and collection of ui coverage on an appliance\")\n\n\ndef pytest_cmdline_main(config):\n # Only register the plugin worker if ui coverage is enabled\n if config.option.ui_coverage:\n config.pluginmanager.register(UiCoveragePlugin(), name=\"ui-coverage\")\n", "sub_path": "fixtures/ui_coverage.py", "file_name": "ui_coverage.py", "file_ext": "py", "file_size_in_byte": 6831, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "py.path.local", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.path.scripts_data_path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.path.scripts_data_path", "line_number": 72, "usage_type": "name"}, {"api_name": "utils.path.log_path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.path.log_path", "line_number": 77, "usage_type": "name"}, {"api_name": "py.error.ENOENT", "line_number": 89, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.parallelizer_role", "line_number": 96, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 96, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.current_appliance.install_coverage", "line_number": 97, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.current_appliance", "line_number": 97, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 97, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.parallelizer_role", "line_number": 99, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 99, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.parallelizer_role", "line_number": 104, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 104, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.terminalreporter.write_sep", "line_number": 105, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.terminalreporter", "line_number": 105, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 105, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.slave_manager.message", "line_number": 107, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.slave_manager", "line_number": 107, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 107, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.parallelizer_role", "line_number": 109, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 109, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.current_appliance.collect_coverage_reports", "line_number": 110, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.current_appliance", "line_number": 110, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 110, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.parallelizer_role", "line_number": 113, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 113, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.log.logger.error", "line_number": 120, "usage_type": "call"}, {"api_name": "utils.log.logger", "line_number": 120, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.terminalreporter.write_sep", "line_number": 121, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.terminalreporter", "line_number": 121, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 121, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.current_appliance.ssh_client", "line_number": 125, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.current_appliance", "line_number": 125, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 125, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store.terminalreporter.write_sep", "line_number": 132, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.terminalreporter", "line_number": 132, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 132, "usage_type": "name"}, {"api_name": "json.load", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.path.log_path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.path.log_path", "line_number": 146, "usage_type": "name"}, {"api_name": "fixtures.pytest_store.store.terminalreporter.line", "line_number": 154, "usage_type": "call"}, {"api_name": "fixtures.pytest_store.store.terminalreporter", "line_number": 154, "usage_type": "attribute"}, {"api_name": "fixtures.pytest_store.store", "line_number": 154, "usage_type": "name"}, {"api_name": "utils.log.logger.error", "line_number": 159, "usage_type": "call"}, {"api_name": "utils.log.logger", "line_number": 159, "usage_type": "name"}, {"api_name": "utils.log.logger.exception", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.log.logger", "line_number": 160, "usage_type": "name"}]} {"seq_id": "169044576", "text": "import torch\nfrom .lowrank import LowRank\nfrom .exceptions import VectorError\n\n\ndef sigmoid(t):\n return 2.0 * (torch.sigmoid(t) - 0.5)\n\n\nclass AlmostOrthogonal(LowRank):\n fs = {\"sigmoid\": sigmoid, \"tanh\": torch.tanh, \"sin\": torch.sin}\n\n def __init__(self, size, lam, f=\"sin\", triv=\"expm\"):\n r\"\"\"\n Manifold of matrices with singular values in the interval :math:`(1-\\lambda, 1+\\lambda)`.\n\n The possible default maps are the :math:`\\sin,\\,\\tanh` functions and a rescaled\n sigmoid. The sigmoid is rescaled as :math:`\\operatorname{sigmoid}(x) = 2\\sigma(x) - 1`\n where :math:`\\sigma` is the usual sigmoid function.\n\n Args:\n size (torch.size): Size of the tensor to be applied to\n lam (float): Radius. A float in the interval [0, 1]\n f (str or callable): Optional. One of `[\"sigmoid\", \"tanh\", \"sin\"]`\n or a callable that maps real numbers to the interval [-1, 1].\n Default: `\"sin\"`\n triv (str or callable): Optional.\n A map that maps :math:`\\operatorname{Skew}(n)` onto the orthogonal\n matrices surjectively. This is used to optimize the Q in the eigenvalue\n decomposition. It can be one of `[\"expm\", \"cayley\"]` or a custom\n callable. Default: `\"expm\"`\n\n \"\"\"\n super().__init__(size, AlmostOrthogonal.rank(size), triv=triv)\n if f not in AlmostOrthogonal.fs.keys() and not callable(f):\n raise ValueError(\n \"Argument f was not recognized and is \"\n \"not callable. Should be one of {}. Found {}\".format(\n list(AlmostOrthogonal.fs.keys()), f\n )\n )\n\n if lam < 0.0 or lam > 1.0:\n raise ValueError(\"The radius has to be between 0 and 1. Got {}\".format(lam))\n\n if callable(f):\n self.f = f\n else:\n self.f = AlmostOrthogonal.fs[f]\n\n self.lam = lam\n\n @classmethod\n def rank(cls, size):\n if len(size) < 2:\n raise VectorError(cls.__name__, size)\n return min(*size[-2:])\n\n def fibration(self, X):\n U, S, V = X\n S = 1.0 + self.lam * self.f(S)\n return super().fibration((U, S, V))\n", "sub_path": "geotorch/almostorthogonal.py", "file_name": "almostorthogonal.py", "file_ext": "py", "file_size_in_byte": 2265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "torch.sigmoid", "line_number": 7, "usage_type": "call"}, {"api_name": "lowrank.LowRank", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.sin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "exceptions.VectorError", "line_number": 56, "usage_type": "call"}]} {"seq_id": "261472480", "text": "from django.shortcuts import render\nfrom django.views import View\nfrom django.http import JsonResponse,HttpResponseServerError,HttpResponseNotFound,HttpResponseBadRequest,HttpResponseRedirect\nimport json, pdb, re, datetime\nfrom pymongo import MongoClient\n\n# Create your views here.\n\nclass AccountsIndexView(AnyView):\n def __init__(self ,*args, **kwargs):\n # this is where the view gets initialized\n AnyView.__init__(self,*args, **kwargs)\n self.useraccounts = self.pifarm['useraccounts']\n def hateoas_replace_links(self, request,result):\n \"\"\"This is to transform the object to hateoas compliant links\"\"\"\n def replace_links(acc):\n if not acc is None and 'license' in acc:\n acc['license'] = request.build_absolute_uri('/accounts/{}/license'.format(acc['login']))\n if isinstance(result, list):\n for item in result:\n replace_links(item)\n else:\n replace_links(result)\n return result\n def accountok(self,acc):\n \"\"\"this is to assess the shape of the account object that is send in from the http client\n \"\"\"\n if acc is None or isinstance(acc, dict) ==False:\n return False\n if not 'login' in acc or not 'pin' in acc:\n return False\n if acc['login']=='' or acc['pin']=='':\n return False\n if re.match(r'[a-zA-Z0-9_-]{3,12}', acc['login']) is None:\n return False\n if re.match(r'[0-9]{3,12}', acc['pin']) is None:\n return False\n return True\n def get (self, request):\n \"\"\"this is to get the index of all the accounts from the database\"\"\"\n if not self.pifarm is None:\n try:\n result = list(self.useraccounts.find({}))\n if not result is None :\n return JsonResponse(self.hateoas_replace_links(request,self.friendly_json(result)), safe=False)\n else:\n return JsonResponse([], safe=False)\n except Exception as e:\n return HttpResponseServerError('Inernal server error: Failed query on the database')\n def post(self, request):\n payload=json.loads(request.body.decode('utf-8'))\n if not payload is None and self.accountok(payload) ==True:\n duplicate = self.useraccounts.find_one({'login':payload['login']})\n if duplicate is None:\n inserted_id =self.useraccounts.insert(payload)\n inserted = self.useraccounts.find_one({'_id':inserted_id})\n # return JsonResponse(self.friendly_json(inserted))\n return HttpResponseRedirect(request.build_absolute_uri('/useraccounts/{}'.format(inserted['login'])))\n else:\n return HttpResponseBadRequest('Bad Request: Account with the same login is already registered')\n else:\n return HttpResponseBadRequest('Bad Request: The account object to be added is not in shape.')\nclass AccountDetailView(AccountsIndexView):\n def __init__(self ,*args, **kwargs):\n # this is where the view gets initialized\n AccountsIndexView.__init__(self,*args, **kwargs)\n def get(self, request, login):\n \"\"\"This is to get a single account details from the database\"\"\"\n result =self.useraccounts.find_one({\"login\":login})\n if not result is None:\n return JsonResponse(self.hateoas_replace_links(request,self.friendly_json(result)), safe=False)\n else:\n return JsonResponse({}, safe=False)\n def put(self, request, login):\n \"\"\"This can help changing the direct properties of the account\"\"\"\n payload =json.loads(request.body.decode('utf-8'))\n if not payload is None and 'pin' in payload:\n account_exists =(True, False)[self.useraccounts.find_one({'login':login}) is None]\n if account_exists ==True:\n new_pin = payload['pin']\n self.useraccounts.update({'login':login}, {'$set':{'pin':new_pin}})\n return HttpResponseRedirect(request.build_absolute_uri('/useraccounts/{}'.format(login)))\n else:\n # this is the case when you are trying to update an account that does not exists\n return HttpResponseBadRequest('Bad request: no acocunt with {} login exists '.format(login))\n else:\n return HttpResponseBadRequest('Bad Request :Account not in valid shape for updation, check for the fields required and send again')\nclass AccountDevices(AnyView):\n def __init__(self, *args, **kwargs):\n AnyView.__init__(self, *args,**kwargs)\n self.devices = self.pifarm['devices']\n def get(self, request,login):\n \"\"\"This is for getting the details of all the devices for the login account\"\"\"\n result = list(self.devices.find({'login':login}))\n return JsonResponse(self.friendly_json(result), safe=False)\n", "sub_path": "http/useraccounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.match", "line_number": 34, "usage_type": "call"}, {"api_name": "re.match", "line_number": 36, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.HttpResponseServerError", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 60, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 62, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 71, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 85, "usage_type": "call"}, {"api_name": "django.http.HttpResponseBadRequest", "line_number": 87, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 95, "usage_type": "call"}]} {"seq_id": "596392889", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport re\nimport datetime\nfrom dateutil import rrule\n\nfrom django.http import JsonResponse, Http404\nfrom django.template.defaultfilters import slugify\nfrom django.views.generic import DetailView\n\nfrom cms.views import details\nfrom publisher.views import PublisherDetailView\nfrom .models import Event, EventPackage\nfrom .settings import EVENT_TYPES\n\n\nBATCH_DELTA = 3 # How many batches to show before + after current batch\n\n# Translations from dateinput formatting to Python formatting\nDATEFORMAT_XLATE = [\n (re.compile(pattern), replacement) for (pattern, replacement) in (\n ('dddd', '%A'),\n ('ddd', '%a'),\n ('dd', '%d'),\n ('!%d', '%e'), # Will include a leading space for 1-9\n ('mmmm', '%B'),\n ('mmm', '%b'),\n ('mm', '%m'),\n ('!%m', '%m'), # Will include leading zero\n ('yyyy', '%Y'),\n ('yy', '%y'),\n )\n]\n\n\ndef dateformat_xlate(dateformat):\n for regexp, replacement in DATEFORMAT_XLATE:\n dateformat = regexp.sub(replacement, dateformat)\n return dateformat\n\n\ndef calculate_occurrences(data):\n # TODO: Return error on failure\n occurrences = []\n\n date_format = dateformat_xlate(data['format'])\n start_date = datetime.datetime(int(data['year']),\n int(data['month']),\n int(data['day']))\n rule = rrule.rrulestr(data['rrule'], dtstart=start_date)\n iterator = iter(rule)\n\n if 'batch_size' in data:\n batch_size = int(data['batch_size'])\n else:\n batch_size = 10\n\n if 'start' in data:\n start = int(data['start'])\n else:\n start = 0\n\n cur_batch = start // batch_size\n start = cur_batch * batch_size # Avoid stupid start-values\n\n if hasattr(rule, '_exdate'):\n exdates = sorted(rule._exdate)\n else:\n exdates = []\n\n # Loop through the start first dates, to skip them:\n i = 0\n occurrences = []\n while True:\n try:\n # Get a date\n date = iterator.next()\n except StopIteration:\n # No more dates\n break\n while exdates and date > exdates[0]:\n # There are exdates that appear before this date:\n if i < start:\n # Skip them\n exdates.pop(0)\n i += 1\n else:\n # include them\n exdate = exdates.pop(0)\n occurrences.append({'date': exdate.strftime('%Y%m%dT%H%M%S'),\n 'formattedDate': exdate.strftime(date_format),\n 'type': 'exdate', })\n i += 1\n\n if i >= batch_size + start:\n break # We are done!\n\n i += 1\n if i <= start:\n # We are still iterating up to the first event, so skip this:\n continue\n\n # Add it to the results\n if date in getattr(rule, '_rdate', []):\n occurrence_type = 'rdate'\n elif date == start_date:\n occurrence_type = 'start'\n else:\n occurrence_type = 'rrule'\n occurrences.append({'date': date.strftime('%Y%m%dT%H%M%S'),\n 'formattedDate': date.strftime(date_format),\n 'type': occurrence_type, })\n\n while exdates:\n # There are exdates that are after the end of the recurrence.\n # Excluding the last dates make no sense, as you can change the\n # range instead, but we need to support it anyway.\n exdate = exdates.pop(0)\n occurrences.append({'date': exdate.strftime('%Y%m%dT%H%M%S'),\n 'formattedDate': exdate.strftime(date_format),\n 'type': 'exdate', })\n\n # Calculate no of occurrences, but only to a max of three times\n # the batch size. This will support infinite recurrence in a\n # useable way, as there will always be more batches.\n first_batch = max(0, cur_batch - BATCH_DELTA)\n last_batch = max(BATCH_DELTA * 2, cur_batch + BATCH_DELTA)\n maxcount = (batch_size * last_batch) - start\n\n num_occurrences = 0\n while True:\n try:\n iterator.next()\n num_occurrences += 1\n except StopIteration:\n break\n if num_occurrences >= maxcount:\n break\n\n # Total number of occurrences:\n num_occurrences += batch_size + start\n\n max_batch = (num_occurrences - 1) // batch_size\n if last_batch > max_batch:\n last_batch = max_batch\n first_batch = max(0, max_batch - (BATCH_DELTA * 2))\n\n batches = [((x * batch_size) + 1, (x + 1) * batch_size) for x in range(first_batch, last_batch + 1)]\n batch_data = {'start': start,\n 'end': num_occurrences,\n 'batch_size': batch_size,\n 'batches': batches,\n 'currentBatch': cur_batch - first_batch,\n }\n\n return {'occurrences': occurrences, 'batch': batch_data}\n\n\ndef get_occurrences(request):\n \"\"\"\n Adapted from the test_server.py of https://github.com/collective/jquery.recurrenceinput.js\n \"\"\"\n data = {}\n\n for x in ('year', 'month', 'day', 'rrule', 'format', 'start',):\n data[x] = request.GET.get(x, None)\n\n if None in data.values():\n return JsonResponse({'error': True, 'message': \"'year', 'month', 'day', 'rrule', and 'format' are required.\"})\n\n result = calculate_occurrences(data)\n return JsonResponse(result)\n\n\nclass EventDetailView(PublisherDetailView):\n template_name = 'happenings/detail/default.html'\n extra_context = {}\n preview = False\n model = Event\n event_types = EVENT_TYPES\n\n def get_queryset(self):\n qs = super(EventDetailView, self).get_queryset()\n types_filter = [x[0] for x in self.event_types]\n qs = qs.filter(event_type__in=types_filter)\n return qs\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n Intercept the request and route to a CMS page if it exists, otherwise\n use the standard detail handling\n \"\"\"\n try:\n return details(request, request.path.lstrip('/').rstrip('/'))\n except Http404:\n pass\n return super(EventDetailView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(EventDetailView, self).get_context_data(**kwargs)\n context.update(self.extra_context)\n\n if self.object.is_archived():\n self.template_name = 'happenings/404.html'\n elif self.object.event_type == 'current exhibition' and self.object.end_date() < datetime.datetime.today():\n self.template_name = 'happenings/detail/past-exhibition.html'\n elif self.object.parent is not None:\n self.template_name = 'happenings/detail/child.html'\n elif self.object.child_events.all():\n self.template_name = 'happenings/detail/parent.html'\n else:\n self.template_name = 'happenings/detail/%s.html' % slugify(self.object.event_type)\n\n return context\n\n\nclass PackageDetailView(DetailView):\n queryset = EventPackage.objects.all()\n template_name = 'happenings/detail/package.html'\n", "sub_path": "happenings/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "call"}, {"api_name": "dateutil.rrule.rrulestr", "line_number": 50, "usage_type": "call"}, {"api_name": "dateutil.rrule", "line_number": 50, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 169, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 172, "usage_type": "call"}, {"api_name": "publisher.views.PublisherDetailView", "line_number": 175, "usage_type": "name"}, {"api_name": "models.Event", "line_number": 179, "usage_type": "name"}, {"api_name": "settings.EVENT_TYPES", "line_number": 180, "usage_type": "name"}, {"api_name": "cms.views.details", "line_number": 194, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 195, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 205, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 205, "usage_type": "attribute"}, {"api_name": "django.template.defaultfilters.slugify", "line_number": 212, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 217, "usage_type": "name"}, {"api_name": "models.EventPackage.objects.all", "line_number": 218, "usage_type": "call"}, {"api_name": "models.EventPackage.objects", "line_number": 218, "usage_type": "attribute"}, {"api_name": "models.EventPackage", "line_number": 218, "usage_type": "name"}]} {"seq_id": "603649510", "text": "import PyQt4.QtGui as qg\nimport PyQt4.QtCore as qc\n\nimport maya.cmds as mc\n\nimport os\nimport xml.etree.ElementTree as ET\n\nimport cg_inventor.sys.lib.qt.base as base\nimport cg_inventor.sys.lib.qt.widgets.scene_check as scene_check\n\nfrom cg_inventor.maya.apps.toolbox import page, settings\n\nimport cg_inventor.maya.utils.generic as util_generic\nimport cg_inventor.maya.utils.transforms as util_transforms\nimport cg_inventor.maya.utils.history as util_history\nimport cg_inventor.maya.utils.mesh as util_mesh\nimport cg_inventor.maya.utils.texture as util_texture\n\n\ncallbackPlus = util_generic.callbackPlus\ncallbackSetup = util_generic.callbackSetup\ncallbackCounter = util_generic.callbackCounter\n\nsetup = scene_check.setup\ncheck = scene_check.check\nselect = scene_check.select\nfix = scene_check.fix\n\nMODE_SEL = scene_check.SceneCheck.MODE_SEL\nMODE_ALL = scene_check.SceneCheck.MODE_ALL\n\n# ------------------------------------------------------------------------------------------------ #\n# MODELLING CHECKS #\n# ------------------------------------------------------------------------------------------------ #\n\nTITLE = 'Scene Checker'\nSUBTITLE = 'Modelling' \n\nclass ModellingChecks(page.Page):\n def __init__(self):\n page.Page.__init__(self)\n self.title = TITLE\n self.subtitle = SUBTITLE\n \n self._all_sets = []\n self._all_spacers = []\n \n self.settings_widget = ModellingChecksSettings\n \n self.loadXML(\"scene_checks_modelling.xml\")\n \n \n def _addSceneCheckSet(self, title):\n new_set = scene_check.SceneCheckSet(title)\n self._all_sets.append(new_set)\n self.addElement(new_set)\n \n self.connect(new_set, qc.SIGNAL(base.Base.STATUS_NORMAL), self.statusNormal)\n self.connect(new_set, qc.SIGNAL(base.Base.STATUS_SUCCESS), self.statusSuccess)\n self.connect(new_set, qc.SIGNAL(base.Base.STATUS_ERROR), self.statusError)\n \n return new_set\n \n\n def clear(self):\n for check_set in self._all_sets:\n self.removeElement(check_set)\n check_set.deleteLater()\n \n for spacer in self._all_spacers:\n self.removeElement(spacer)\n \n self._all_sets = []\n self._all_spacers = []\n \n \n def loadXML(self, xml_file):\n # get toolbox xml path\n #\n import cg_inventor.maya.apps.toolbox as toolbox\n xml_path = os.path.join(os.path.dirname(toolbox.__file__), 'xml', xml_file)\n \n # get the root element\n #\n tree = ET.parse(xml_path)\n root = tree.getroot()\n \n for check_set in root:\n set_title = check_set.attrib['title']\n new_set = self._addSceneCheckSet(set_title)\n \n for check in check_set:\n function_set = str(check.text).strip()\n new_check = scene_check.SceneCheck()\n new_check.loadFromLibrary(function_set)\n \n new_set.addSceneCheck(new_check)\n \n spacer = qg.QSpacerItem(20,20,qg.QSizePolicy.Fixed)\n self.addElement(spacer)\n self._all_spacers.append(spacer)\n \n \n# ------------------------------------------------------------------------------------------------ #\n# MODELLING CHECKS #\n# ------------------------------------------------------------------------------------------------ #\n\nclass ModellingChecksSettings(settings.Settings):\n def __init__(self):\n settings.Settings.__init__(self)\n #self.setMinimumWidth(600)\n self.setFixedWidth(700)\n \n main_layout = qg.QHBoxLayout()\n main_layout.setContentsMargins(5,0,5,5)\n main_layout.setSpacing(5)\n self.addElement(main_layout)\n \n group_widget = qg.QWidget()\n group_widget.setLayout(qg.QVBoxLayout())\n group_widget.layout().setContentsMargins(0,0,0,0)\n group_widget.layout().setSpacing(0)\n check_widget = qg.QWidget()\n check_widget.setLayout(qg.QVBoxLayout())\n check_widget.layout().setContentsMargins(0,0,0,0)\n check_widget.layout().setSpacing(0)\n button_widget = qg.QWidget()\n button_widget.setLayout(qg.QVBoxLayout())\n all_checks_widget = qg.QWidget()\n all_checks_widget.setLayout(qg.QVBoxLayout())\n all_checks_widget.layout().setContentsMargins(0,0,0,0)\n all_checks_widget.layout().setSpacing(0)\n description_widget = qg.QWidget()\n description_widget.setLayout(qg.QVBoxLayout())\n description_widget.layout().setContentsMargins(0,0,0,0)\n description_widget.layout().setSpacing(0)\n \n button_widget.setFixedWidth(30)\n \n main_layout.addWidget(group_widget)\n main_layout.addWidget(check_widget)\n main_layout.addWidget(button_widget)\n main_layout.addWidget(all_checks_widget)\n main_layout.addWidget(description_widget)\n \n title_font = qg.QFont()\n title_font.setBold(True)\n \n group_label = qg.QLabel('GROUPS')\n group_layout = qg.QHBoxLayout()\n checks_label = qg.QLabel('CHECKS')\n checks_layout = qg.QHBoxLayout()\n all_checks_label = qg.QLabel('ALL CHECKS')\n all_checks_layout = qg.QHBoxLayout()\n description_label = qg.QLabel('DESCRIPTION')\n description_layout = qg.QHBoxLayout()\n \n group_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n group_layout.addWidget(group_label)\n group_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n \n checks_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n checks_layout.addWidget(checks_label)\n checks_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n \n all_checks_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n all_checks_layout.addWidget(all_checks_label)\n all_checks_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n \n description_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n description_layout.addWidget(description_label)\n description_layout.addSpacerItem(qg.QSpacerItem(5, 5, qg.QSizePolicy.Expanding))\n \n group_label.setFont(title_font)\n checks_label.setFont(title_font)\n all_checks_label.setFont(title_font)\n description_label.setFont(title_font)\n \n group_label.setFixedHeight(18)\n checks_label.setFixedHeight(18)\n all_checks_label.setFixedHeight(18)\n description_label.setFixedHeight(18)\n \n self.group_list = qg.QListWidget()\n self.check_list = qg.QListWidget()\n self.all_checks_list = qg.QListWidget()\n self.description_te = qg.QTextEdit()\n \n group_widget.layout().addLayout(group_layout)\n group_widget.layout().addWidget(self.group_list)\n check_widget.layout().addLayout(checks_layout)\n check_widget.layout().addWidget(self.check_list)\n all_checks_widget.layout().addLayout(all_checks_layout)\n all_checks_widget.layout().addWidget(self.all_checks_list)\n description_widget.layout().addLayout(description_layout)\n description_widget.layout().addWidget(self.description_te)\n \n self.all_checks_list.itemClicked.connect(self.showDescription)\n \n self._check_descriptions = {}\n \n self.populateAllChecks()\n \n \n def clearAllChecks(self):\n pass\n \n \n def populateAllChecks(self):\n self.clearAllChecks()\n \n all_names = []\n for check_name, setup_func in scene_check.SceneCheckLibrary.setup.items():\n title, _, description = setup_func()\n self._check_descriptions[title] = description\n all_names.append(title)\n \n for name in sorted(all_names):\n new_item = qg.QListWidgetItem()\n new_item.setText(name)\n self.all_checks_list.addItem(new_item)\n \n \n \n def showDescription(self, check_item): \n description = self._check_descriptions[str(check_item.text())]\n \n self.description_te.setText(description)\n \n \n \n# ------------------------------------------------------------------------------------------------ #\n# CHECKS #\n# ------------------------------------------------------------------------------------------------ #\n\n@setup\ndef freezeTransforms():\n title = 'Unfrozen Transforms'\n message = 'Found $NUM Unfrozen Transform(s)'\n description = 'Finds all transforms with unfrozen values.' \n return title, message, description\n\n\n@check\ndef freezeTransforms(selection_mode, callback=None): \n try: \n if selection_mode == MODE_SEL:\n transforms = mc.ls(sl=True)\n else:\n transforms = util_transforms.getAllTransforms()\n \n if not len(transforms): return\n\n return util_transforms.getUnfrozenTransforms(transforms, callback=callback)\n \n except util_transforms.TransformError as e:\n raise scene_check.SceneCheckError(e)\n\n\n@select\ndef freezeTransforms(unfrozen_transforms):\n mc.select(mc.ls(unfrozen_transforms, type='transform'), r=True)\n \n\n@fix\ndef freezeTransforms(transforms, callback=None):\n try:\n util_transforms.freezeTransforms(transforms, filter=True, callback=callback)\n except util_transforms.TransformError as e:\n raise scene_check.SceneCheckError(str(e)) \n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef nonDeletedHistory():\n title = 'Non-Deleted History'\n message = 'Found $NUM Node(s) with History'\n description = 'Find any nodes with history.'\n return title, message, description\n\n\n@check\ndef nonDeletedHistory(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n transforms = mc.ls(sl=True)\n else:\n transforms = util_transforms.getAllTransforms()\n \n if not len(transforms): return\n \n return util_history.getNodesWithHistory(transforms, callback=callback)\n except util_history.HistoryError as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef nonDeletedHistory(history_nodes):\n mc.select(mc.ls(history_nodes), r=True)\n \n \n@fix\ndef nonDeletedHistory(history_nodes):\n try:\n util_history.deleteHistory(history_nodes)\n except util_history.HistoryError as e:\n raise scene_check.SceneCheckError(str(e))\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef centeredPivots():\n title = 'Non Centered Pivots'\n message = 'Found $NUM Non Centered Pivot(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n \n@check\ndef centeredPivots(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n if not len(meshes): return\n \n if callback: counter, increment = callbackSetup(len(meshes), callback_range=30)\n \n transforms = set([])\n for mesh in meshes:\n if callback: callback(callbackCounter(counter, increment))\n \n if util_mesh._isMeshEmpty(mesh): continue\n transforms.add(util_mesh.getTransformFromShape(mesh))\n \n if callback: counter, increment = callbackSetup(len(transforms), callback_range=70)\n \n not_centred = []\n for transform in transforms:\n if callback: callback(30 + callbackCounter(counter, increment))\n \n if not util_transforms.isPivotCentered(transform):\n not_centred.append(transform)\n \n return not_centred\n \n except (util_transforms.TransformError, util_mesh.MeshError) as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef centeredPivots(transforms):\n mc.select(transforms, r=True)\n \n \n@fix\ndef centeredPivots(transforms, callback=None):\n try:\n mc.xform(transforms, cp=True)\n except Exception as e:\n raise scene_check.SceneCheckError(str(e)) \n\n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef unknownNodes():\n title = \"'Unknown' Nodes\"\n message = \"Found $NUM 'Unknown' Node(s)\"\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef unknownNodes(selection_mode):\n if selection_mode == MODE_SEL:\n return mc.ls(sl=True, type='unknown')\n else:\n return mc.ls(type='unknown')\n \n \n@select\ndef unknownNodes(nodes):\n mc.select(mc.ls(nodes), r=True)\n\n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef triangles():\n title = 'Triangles'\n message = 'Found $NUM Triangle(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef triangles(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n if not len(meshes): return\n \n return util_mesh.checkMesh(meshes, flags=util_mesh.TRIANGLES, callback=callback)\n \n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e)) \n\n\n@select\ndef triangles(triangles):\n mc.select(triangles, r=True)\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef nsided():\n title = 'N-Sided Faces'\n message = 'Found $NUM N-Sided Face(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef nsided(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n if not len(meshes): return\n \n return util_mesh.checkMesh(meshes, flags=util_mesh.NSIDED, callback=callback)\n \n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e)) \n\n\n@select\ndef nsided(faces):\n mc.select(faces, r=True)\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef holed():\n title = 'Holes'\n message = 'Found $NUM Hole(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef holed(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n if not len(meshes): return\n \n if callback: counter, increment = callbackSetup(len(meshes), callback_range=30)\n \n holes = []\n for mesh in meshes:\n if callback: callback(callbackCounter(counter, increment))\n \n if util_mesh._isMeshEmpty(mesh): continue\n mesh_holes = util_mesh.getMeshHoles(mesh)\n holes.extend([(mesh, mesh_hole) for mesh_hole in mesh_holes])\n \n return holes\n\n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e)) \n\n\n@select\ndef holed(holes):\n edges = []\n for mesh, edge_indices in holes:\n edges.extend(['%s.e[%s]' %(mesh, edge_index) for edge_index in edge_indices])\n mc.select(mc.ls(edges), r=True)\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef zeroLengthEdges():\n title = 'Zero Length Edges'\n message = 'Found $NUM Zero Length Edge(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef zeroLengthEdges(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n if not len(meshes): return\n \n return util_mesh.checkMesh(meshes, flags=util_mesh.ZERO_LENGTH_EDGES, callback=callback)\n \n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e))\n\n@select\ndef zeroLengthEdges(edges):\n mc.select(mc.ls(edges), r=True)\n\n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef zeroAreaFaces(): \n title = 'Zero Area Faces'\n message = 'Found $NUM Zero Area Face(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef zeroAreaFaces(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n if not len(meshes): return\n \n return util_mesh.checkMesh(meshes, flags=util_mesh.ZERO_AREA_FACES, callback=callback)\n \n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef zeroAreaFaces(faces):\n mc.select(mc.ls(faces), r=True)\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef lockedNormals():\n title = 'Locked Normals'\n message = 'Found $NUM Locked Normal(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef lockedNormals(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n num_meshes = len(meshes)\n if not num_meshes: return\n \n if callback: counter, increment = callbackSetup(num_meshes, callback_range=100)\n \n locked_normals = []\n for mesh in meshes: \n if callback: callback(callbackCounter(counter, increment))\n \n if util_mesh._isMeshEmpty(mesh): continue\n indices = util_mesh.getLockedVertexNormals(mesh)\n vertices = ['%s.vtx[%s]' %(mesh, index) for index in indices]\n locked_normals.extend(vertices)\n \n return locked_normals\n\n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef lockedNormals(normals):\n mc.select(mc.ls(normals), r=True)\n \n \n@fix\ndef lockedNormals(normals):\n try:\n mc.polyNormalPerVertex(normals, ufn=True)\n except Exception as e:\n raise scene_check.SceneCheckError(str(e))\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef hardEdges():\n title = 'Hard Edges'\n message = 'Found $NUM Hard Edge(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef hardEdges(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n num_meshes = len(meshes)\n if not num_meshes: return\n \n if callback: counter, increment = callbackSetup(num_meshes, callback_range=100)\n \n hard_edges = []\n for mesh in meshes:\n if callback: callback(callbackCounter(counter, increment))\n \n if util_mesh._isMeshEmpty(mesh): continue\n hard_edges.extend([(mesh, edge) for edge in util_mesh.getHardEdges(mesh)])\n \n return hard_edges\n \n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef hardEdges(edges):\n edge_strings = []\n for mesh, edge in edges:\n edge_strings.append('%s.e[%s]' %(mesh, edge))\n mc.select(edge_strings, r=True)\n \n \n@fix\ndef hardEdges(edges, callback=None):\n try:\n all_hard_edges = {}\n \n if callback: \n counter, increment = callbackSetup(len(edges), callback_range=20)\n \n for mesh, edge in edges:\n if callback: callback(callbackCounter(counter, increment))\n \n try:\n hard_edges = all_hard_edges[mesh]\n except KeyError:\n hard_edges = all_hard_edges[mesh] = []\n hard_edges.append('%s.e[%s]' %(mesh, edge))\n \n if callback: \n counter, increment = callbackSetup(len(all_hard_edges.keys()), callback_range=80)\n \n for mesh, hard_edges in all_hard_edges.items():\n if callback: callback(20 + callbackCounter(counter, increment))\n \n history = bool(mc.listHistory(mesh, il=2, pdo=True))\n mc.polySoftEdge(*hard_edges, a=180, ch=history)\n \n except Exception as e:\n raise scene_check.SceneCheckError(str(e))\n\n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef unconnectedIntermediates():\n title = 'Unconnected Intermediates'\n message = 'Found $NUM Unconnected Intermediate(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n \n\n@check\ndef unconnectedIntermediates(selection_mode):\n return util_mesh.getUnconnectedIntermediates()\n\n\n@select\ndef unconnectedIntermediates(intermediates):\n mc.select(mc.ls(intermediates), r=True)\n\n \n@fix\ndef unconnectedIntermediates(intermediates):\n intermediates = mc.ls(intermediates)\n if not intermediates: return\n \n mc.delete(intermediates)\n \n#--------------------------------------------------------------------------------------------------# \n\n@setup\ndef emptyMeshes():\n title = 'Empty Meshes'\n message = 'Found $NUM Empty Mesh(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef emptyMeshes(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getEmptyMeshes()\n \n num_meshes = len(meshes)\n if not num_meshes: return\n \n if callback: counter, increment = callbackSetup(num_meshes, callback_range=100)\n \n empty_meshes = []\n for mesh in meshes:\n if callback: callback(callbackCounter(counter, increment))\n \n if util_mesh._isMeshEmpty(mesh):\n empty_meshes.append(mesh)\n \n return meshes\n \n except util_mesh.MeshError as e:\n raise scene_check.SceneCheckError(str(e))\n \n \n@select \ndef emptyMeshes(meshes): \n mc.select(mc.ls(meshes), r=True)\n \n \n@fix\ndef emptyMeshes(meshes):\n meshes = mc.ls(meshes)\n if not meshes: return\n \n try:\n mc.delete(meshes)\n except Exception as e:\n raise scene_check.SceneCheckError(str(e))\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef unusedShaders():\n title = 'Unused Shaders'\n message = 'Found $NUM Unused Shader(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef unusedShaders(selection_mode):\n try:\n if selection_mode == MODE_SEL:\n materials = mc.ls(sl=True, mat=True)\n \n unused_materials = []\n for material in materials: \n if util_texture._getMaterialMembers(material) is None:\n unused_materials.append(material)\n \n else:\n unused_materials = util_texture.getAllUnusedMaterials()\n \n return unused_materials\n \n except util_texture.TextureError as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef unusedShaders(materials):\n mc.select(materials, r=True)\n \n \n@fix\ndef unusedShaders(materials, callback=None):\n try:\n if callback: counter, increment = callbackSetup(len(materials), callback_range=100)\n \n for material in materials:\n if callback: callback(callbackCounter(counter, increment))\n \n util_texture.deleteMaterial(material)\n \n except util_texture.TextureError as e:\n raise scene_check.SceneCheckError(str(e))\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef uvsOutOfBounds():\n title = 'UVs Out of Bounds'\n message = 'Found $NUM UV(s) Out of Bounds'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef uvsOutOfBounds(selection_mode, callback=None):\n try: \n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n num_meshes = len(meshes)\n if not num_meshes: return \n \n if callback: counter, increment = callbackSetup(num_meshes, callback_range=100)\n \n all_uvs_oob = []\n for mesh in meshes:\n if callback: callback(callbackCounter(counter, increment))\n \n uvs_oob = util_texture.getAllUVsOutOfBounds(mesh)\n for uv_index in uvs_oob:\n all_uvs_oob.append('%s.map[%s]' %(mesh, uv_index))\n \n return all_uvs_oob\n \n except util_texture.TextureError as e:\n raise scene_check.SceneCheckError(str(e))\n\n\n@select\ndef uvsOutOfBounds(uvs):\n mc.select(uvs, r=True)\n \n#--------------------------------------------------------------------------------------------------#\n\n@setup\ndef fivePointers():\n title = 'Five+ Pointers'\n message = 'Found $NUM Five+ Pointer(s)'\n description = 'Find any geometry with uncentered pivots.'\n return title, message, description\n\n\n@check\ndef fivePointers(selection_mode, callback=None):\n try:\n if selection_mode == MODE_SEL:\n meshes = util_mesh.getMeshesFromSelection(mc.ls(sl=True))\n else:\n meshes = util_mesh.getAllMeshes()\n \n five_pointers = []\n for mesh in meshes:\n vertices = util_mesh.getFiveEdgeVerts(mesh)\n for vertex_index in vertices:\n five_pointers.append('%s.vtx[%s]' %(mesh, vertex_index))\n \n return five_pointers\n \n except Exception as e:\n raise scene_check.SceneCheckError(str(e))\n \n \n@select\ndef fivePointers(verts):\n mc.select(verts, r=True)", "sub_path": "maya/apps/toolbox/pages/scene_checks/modelling.py", "file_name": "modelling.py", "file_ext": "py", "file_size_in_byte": 27801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cg_inventor.maya.utils.generic.callbackPlus", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.generic", "line_number": 21, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.generic.callbackSetup", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.generic", "line_number": 22, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.generic.callbackCounter", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.generic", "line_number": 23, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.setup", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 25, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.check", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 26, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.select", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 27, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.fix", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 28, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheck", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 30, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheck", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 31, "usage_type": "name"}, {"api_name": "cg_inventor.maya.apps.toolbox.page.Page", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.apps.toolbox.page", "line_number": 40, "usage_type": "name"}, {"api_name": "cg_inventor.maya.apps.toolbox.page.Page.__init__", "line_number": 42, "usage_type": "call"}, {"api_name": "cg_inventor.maya.apps.toolbox.page.Page", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.apps.toolbox.page", "line_number": 42, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckSet", "line_number": 55, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 55, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.base.Base", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.base", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 60, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.base.Base", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.base", "line_number": 60, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt4.QtCore", "line_number": 61, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.base.Base", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.base", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 82, "usage_type": "call"}, {"api_name": "cg_inventor.maya.apps.toolbox.__file__", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.apps.toolbox", "line_number": 82, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 86, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 86, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheck", "line_number": 95, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 95, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 100, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 100, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.apps.toolbox.settings.Settings", "line_number": 109, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.apps.toolbox.settings", "line_number": 109, "usage_type": "name"}, {"api_name": "cg_inventor.maya.apps.toolbox.settings.Settings.__init__", "line_number": 111, "usage_type": "call"}, {"api_name": "cg_inventor.maya.apps.toolbox.settings.Settings", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.apps.toolbox.settings", "line_number": 111, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 115, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 120, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 120, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 121, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 121, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 124, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 124, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 125, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 125, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 128, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 129, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 129, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 130, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 130, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 135, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 135, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFont", "line_number": 147, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 147, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 150, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 150, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 151, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 151, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 152, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 152, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 153, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 153, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 154, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 154, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 155, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 155, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 156, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 156, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QHBoxLayout", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 157, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 159, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 159, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 159, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 161, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 161, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 161, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 163, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 163, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 163, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 165, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 165, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 165, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 167, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 167, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 167, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 169, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 169, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 171, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 171, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 171, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QSpacerItem", "line_number": 173, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 173, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QSizePolicy", "line_number": 173, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 185, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 185, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 186, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 186, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 187, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 187, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QTextEdit", "line_number": 188, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 188, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckLibrary.setup.items", "line_number": 214, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckLibrary", "line_number": 214, "usage_type": "attribute"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 214, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidgetItem", "line_number": 220, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 220, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 249, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 249, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.getAllTransforms", "line_number": 251, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 251, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.getUnfrozenTransforms", "line_number": 255, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 255, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.TransformError", "line_number": 257, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 257, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 258, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 258, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 263, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 263, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 263, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.transforms.freezeTransforms", "line_number": 269, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 269, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.TransformError", "line_number": 270, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 270, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 271, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 271, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 287, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 287, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.getAllTransforms", "line_number": 289, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 289, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.history.getNodesWithHistory", "line_number": 293, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.history", "line_number": 293, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.history.HistoryError", "line_number": 294, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.history", "line_number": 294, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 295, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 295, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 300, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 300, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 300, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.history.deleteHistory", "line_number": 306, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.history", "line_number": 306, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.history.HistoryError", "line_number": 307, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.history", "line_number": 307, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 308, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 308, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 324, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 324, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 324, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 324, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 326, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 326, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh._isMeshEmpty", "line_number": 336, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 336, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getTransformFromShape", "line_number": 337, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 337, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.isPivotCentered", "line_number": 345, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 345, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.transforms.TransformError", "line_number": 350, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.transforms", "line_number": 350, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 350, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 350, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 351, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 351, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 356, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 356, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 362, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 362, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 364, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 364, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 379, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 379, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 381, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 381, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 386, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 386, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 386, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 402, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 402, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 402, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 402, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 404, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 404, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.checkMesh", "line_number": 408, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 408, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.TRIANGLES", "line_number": 408, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 410, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 410, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 411, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 411, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 416, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 416, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 432, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 432, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 432, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 432, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 434, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 434, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.checkMesh", "line_number": 438, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 438, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.NSIDED", "line_number": 438, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 440, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 440, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 441, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 441, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 446, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 446, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 462, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 462, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 462, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 462, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 464, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 464, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh._isMeshEmpty", "line_number": 474, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 474, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshHoles", "line_number": 475, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 475, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 480, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 480, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 481, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 481, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 489, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 489, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 489, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 505, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 505, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 505, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 505, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 507, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 507, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.checkMesh", "line_number": 511, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 511, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.ZERO_LENGTH_EDGES", "line_number": 511, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 513, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 513, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 514, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 514, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 518, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 518, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 518, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 534, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 534, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 534, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 534, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 536, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 536, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.checkMesh", "line_number": 540, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 540, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.ZERO_AREA_FACES", "line_number": 540, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 542, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 542, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 543, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 543, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 548, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 548, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 548, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 564, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 564, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 564, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 564, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 566, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 566, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh._isMeshEmpty", "line_number": 577, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 577, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getLockedVertexNormals", "line_number": 578, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 578, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 584, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 584, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 585, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 585, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 590, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 590, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 590, "usage_type": "call"}, {"api_name": "maya.cmds.polyNormalPerVertex", "line_number": 596, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 596, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 598, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 598, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 614, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 614, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 614, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 614, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 616, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 616, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh._isMeshEmpty", "line_number": 627, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 627, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getHardEdges", "line_number": 628, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 628, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 632, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 632, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 633, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 633, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 641, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 641, "usage_type": "name"}, {"api_name": "maya.cmds.listHistory", "line_number": 667, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 667, "usage_type": "name"}, {"api_name": "maya.cmds.polySoftEdge", "line_number": 668, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 668, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 671, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 671, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getUnconnectedIntermediates", "line_number": 685, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 685, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 690, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 690, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 690, "usage_type": "call"}, {"api_name": "maya.cmds.ls", "line_number": 695, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 695, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 698, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 698, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 714, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 714, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 714, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 714, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getEmptyMeshes", "line_number": 716, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 716, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh._isMeshEmpty", "line_number": 727, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 727, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.MeshError", "line_number": 732, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 732, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 733, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 733, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 738, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 738, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 738, "usage_type": "call"}, {"api_name": "maya.cmds.ls", "line_number": 743, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 743, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 747, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 747, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 749, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 749, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 765, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 765, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture._getMaterialMembers", "line_number": 769, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 769, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture.getAllUnusedMaterials", "line_number": 773, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 773, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture.TextureError", "line_number": 777, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 777, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 778, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 778, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 783, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 783, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture.deleteMaterial", "line_number": 794, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 794, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture.TextureError", "line_number": 796, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 796, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 797, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 797, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 813, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 813, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 813, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 813, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 815, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 815, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture.getAllUVsOutOfBounds", "line_number": 826, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 826, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.texture.TextureError", "line_number": 832, "usage_type": "attribute"}, {"api_name": "cg_inventor.maya.utils.texture", "line_number": 832, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 833, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 833, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 838, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 838, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getMeshesFromSelection", "line_number": 854, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 854, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 854, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 854, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getAllMeshes", "line_number": 856, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 856, "usage_type": "name"}, {"api_name": "cg_inventor.maya.utils.mesh.getFiveEdgeVerts", "line_number": 860, "usage_type": "call"}, {"api_name": "cg_inventor.maya.utils.mesh", "line_number": 860, "usage_type": "name"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check.SceneCheckError", "line_number": 867, "usage_type": "call"}, {"api_name": "cg_inventor.sys.lib.qt.widgets.scene_check", "line_number": 867, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 872, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 872, "usage_type": "name"}]} {"seq_id": "113853845", "text": "#!/usr/bin/python3\nimport datetime\nfrom datetime import datetime, timedelta\nimport argparse\nimport sys\nfrom lib.datacalc import datacalculator\nfrom lib.checktotal import videocounter\nfrom lib.youtubevideosearch import *\nfrom lib.downloadcomments import *\n\n#### VARIABLES #####\n\npAfter=None\npBefore=None\nptoken=\"\"\nstalkerlimit=10000\n\n##### ARGS DATA FORMAT AND EXISTENCE VALIDATION ######\n\ndef validate_date(date):\n try:\n return datetime.strptime(date, \"%Y-%m-%d\").date()\n except ValueError:\n msg=\"Not a valid date: '{0}'\".format(date)\n raise argparse.ArgumentTypeError(msg)\ndef validate_vorder(order):\n orders=[\"date\",\"rating\",\"relevance\",\"title\",\"videoCount\",\"viewCount\"]\n if order in orders:\n return order\n else:\n msg=ValueError(\"Invalid video order option.\")\n raise argparse.ArgumentTypeError(msg)\ndef validate_corder(order):\n orders=[\"time\",\"relevance\"]\n if order in orders:\n return order\n else:\n msg=ValueError(\"Invalid comment order option.\")\n raise argparse.ArgumentTypeError(msg)\n\n\nif __name__ == '__main__':\n\n ########### ARGPARSE #############\n parser = argparse.ArgumentParser(prog='Youtube Stalker v.0.1', usage='%(prog)s [options]',description='Youtube Stalker is a simple python tool to help you monitor youtube videos comments about a topic of your interest and generate a output that can be used for data analytics and intelligence.',epilog='Usage example: youstalker.py -a 12-01-2018 -b 12-31-2018 -q \"cats\"')\n parser.add_argument('-q', '--query',help='Keyword - video search argument',action='store', required=True)\n parser.add_argument('-a', '--after', help='Published after - video search argument. Format YYYY-MM-DD', action=\"store\", type=validate_date)\n parser.add_argument('-b', '--before',help='Published before - video search argument. Format YYYY-MM-DD', action=\"store\", type=validate_date)\n parser.add_argument('-l','--video-limit', help='Video results limit. (default: 500). Maximum = {0}'.format(stalkerlimit), type=int, default='500')\n parser.add_argument('-o','--video-order',help='Possible orders: date, rating, relevance, title, videoCount, viewCount. (default: viewCount)',default='viewCount', action=\"store\",type=validate_vorder)\n parser.add_argument('-L','--comment-limit', help='Comments limit for each video.(default: all comments)',type=int)\n parser.add_argument('-O','--comment-order', help='Possible orders: time, relevance. (default: time)', default='time',type=validate_corder)\n args = parser.parse_args()\n\n ########### ARGS VALUES VALIDATION ##########\n if args.video_limit > stalkerlimit:\n print(\"Stalker's limit is {0} videos for video search. Quitting...\".format(stalkerlimit))\n sys.exit(1)\n if args.after and args.before:\n diff= args.after - args.before\n if diff.days > 0:\n print(\"--before and --after range is invalid!\")\n sys.exit(1)\n if (args.video_limit > 500 and (args.after==None or args.before==None)):\n print(\"Youtube v3 API has a embeded video search limit of 500. \\nIf you provide publishedAfter and publishedBefore values \\nYoustalker will calculate the time range and split your \\nquery into small queries to bypass this restriction and bring you 10000 videos results max.\")\n sys.exit(1) \n if args.after:\n pAfter=(str(args.after)+\"T00:00:01Z\")\n if args.before:\n pBefore=(str(args.before)+\"T23:59:59Z\")\n\n ######### STALKER USE CASES ############\n\n if ((pAfter and pBefore) != None and args.limit > 500):\n print(\"Wait... I'm working on your request...\")\n videototal=videocounter(args.query,pAfter,pBefore,ptoken,args.video_order)\n days=datacalculator(pAfter,pBefore,videototal)\n pega_comments(videos,args.comment_order,args.comment_limit)\n videos=youtube_searchdate(args.query,pAfter,pBefore,ptoken,args.video_order,days,args.video_limit,videototal)\n else:\n print(\"Wait... I'm working on your request...\")\n videos=youtube_search(args.query,pAfter,pBefore,ptoken,args.video_order,args.video_limit)\n pega_comments(videos,args.comment_order,args.comment_limit)\n\n", "sub_path": "youstalker.py", "file_name": "youstalker.py", "file_ext": "py", "file_size_in_byte": 4194, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 32, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 39, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}, {"api_name": "lib.checktotal.videocounter", "line_number": 76, "usage_type": "call"}, {"api_name": "lib.datacalc.datacalculator", "line_number": 77, "usage_type": "call"}]} {"seq_id": "93920530", "text": "import time\nimport io\n\ndef parseFastaFile1(infile, db):\n with open(infile, 'r') as inFile:\n for line in inFile:\n if line[0] == \">\":\n ident, desc = line[1:].strip().split(maxsplit=1)\n db.append({'id': ident, 'description': desc, 'sequence': \"\", 'raw': line})\n else:\n db[-1]['sequence'] += line.rstrip()\n db[-1]['raw'] += line\n\n\ndef parseFastaFile2(infile, db):\n with open(infile, \"rb\") as inFile:\n for line in inFile:\n if line[0] == 62: # bin token for \">\"\n ident, desc = line[1:].strip().split(maxsplit=1)\n db.append({'id': ident,\n 'description': desc,\n 'sequence': bytearray(),\n 'raw': bytearray(line)})\n else:\n db[-1]['sequence'] += (line.rstrip())\n db[-1]['raw'] += (line)\n\n# version using StringIO\ndef parseFastaFile3(infile, db):\n with open(infile, \"r\") as inFile:\n for line in inFile:\n if line[0] == \">\":\n ident, desc = line[1:].strip().split(maxsplit=1)\n db.append({'id': ident,\n 'description': desc,\n 'sequence': io.StringIO(),\n 'raw': io.StringIO(line)})\n else:\n db[-1]['sequence'].write(line.rstrip())\n db[-1]['raw'].write(line)\n\n\n# version using BytesIO\ndef parseFastaFile4(infile, db):\n with open(infile, \"rb\") as inFile:\n for line in inFile:\n if line[0] == 62: # bin token for \">\"\n ident, desc = line[1:].strip().split(maxsplit=1)\n db.append({'id': ident,\n 'description': desc,\n 'sequence': io.BytesIO(),\n 'raw': io.BytesIO(line)})\n else:\n db[-1]['sequence'].write(line.rstrip())\n db[-1]['raw'].write(line)\n\n# Stop time of function calls\ndef stop_time(func, *args, **kargs):\n start = time.time()\n\n result = func(*args, **kargs)\n\n end = time.time()\n print(\"Function {} takes {:.3} seconds\".format(func.__name__, end - start))\n return result\n\nd = []\n\nres1 = stop_time(parseFastaFile1, \"../examples/long.fasta\", db=d)\nres2 = stop_time(parseFastaFile2, \"../examples/long.fasta\", db=d)\nres3 = stop_time(parseFastaFile3, \"../examples/long.fasta\", db=d)\nres4 = stop_time(parseFastaFile4, \"../examples/long.fasta\", db=d)\n\n# Compare both sequences\nprint(d[0]['sequence'] == d[1]['sequence'].decode())\nprint(d[0]['sequence'] == d[2]['sequence'].getvalue())\nprint(d[0]['sequence'] == d[3]['sequence'].getvalue().decode())", "sub_path": "genio0815/9B.py", "file_name": "9B.py", "file_ext": "py", "file_size_in_byte": 2742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "io.StringIO", "line_number": 36, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 37, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 51, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}]} {"seq_id": "628810870", "text": "import os\nimport re\nimport socket\n\nimport trello\nfrom hugin.flowcells import Flowcell\nfrom flowcell_status import FlowcellStatus, FC_STATUSES\n\nFC_NAME_RE = r'(\\d{6})_([ST-]*\\w+\\d+)_\\d+_([AB]?)([A-Z0-9\\-]+)'\n\nCOLORS = [\n 'red',\n 'blue',\n 'green',\n 'yellow',\n 'orange',\n 'purple',\n 'lime',\n 'pink',\n 'sky',\n 'black'\n]\n\nclass FlowcellMonitor(object):\n def __init__(self, config):\n self._config = config\n # initialize None values for @property functions\n self._trello_board = None\n self._data_folders = None\n self._trello_cards = None\n self._trello_lists = None\n\n @property\n def config(self):\n return self._config\n\n @property\n def trello_board(self):\n if not self._trello_board:\n if not self.config.get('trello'):\n raise RuntimeError(\"'trello' must be in config file\")\n\n config = self.config.get('trello')\n # todo check if board exist\n\n api_key = config.get('api_key')\n token = config.get('token')\n api_secret = config.get('api_secret')\n client = trello.TrelloClient(api_key=api_key, token=token, api_secret=api_secret)\n board_id = config.get('board_id')\n self._trello_board = client.get_board(board_id)\n\n return self._trello_board\n\n @property\n def data_folders(self):\n if not self._data_folders:\n self._data_folders = self.config.get('data_folders')\n if self._data_folders is None:\n raise RuntimeError(\"'data_folders' must be in config file\")\n return self._data_folders\n\n @property\n def trello_cards(self):\n if self._trello_cards is None:\n self._trello_cards = self.trello_board.all_cards()\n return self._trello_cards\n\n @property\n def trello_lists(self):\n if self._trello_lists is None:\n self._trello_lists = self.trello_board.all_lists()\n return self._trello_lists\n\n def update_trello_board(self):\n for data_folder in self.data_folders:\n self._check_running_flowcells(data_folder)\n self._check_nosync_flowcells(data_folder)\n # move deleted flowcells to the archive list\n self._check_archived_flowcells(data_folder)\n\n def _check_running_flowcells(self, data_folder):\n # go through subfolders\n subfolders = filter(os.path.isdir, [os.path.join(data_folder, fc_path) for fc_path in os.listdir(data_folder)])\n for flowcell_path in subfolders:\n # skip non-flowcell folders\n if not re.match(FC_NAME_RE, os.path.basename(flowcell_path)):\n continue\n\n status = FlowcellStatus(flowcell_path)\n # depending on the type, return instance of related class (hiseq, hiseqx, miseq, etc)\n flowcell = Flowcell.init_flowcell(status)\n if flowcell.check_status():\n # todo: add comment\n # todo: if comment has been added\n pass\n # update flowcell on trello board\n self._update_card(flowcell)\n\n def _check_nosync_flowcells(self, data_folder):\n # check nosync folder\n nosync_folder = os.path.join(data_folder, 'nosync')\n if os.path.exists(nosync_folder):\n # move flowcell to nosync list\n for nosync_flowcell in os.listdir(nosync_folder):\n flowcell_path = os.path.join(nosync_folder, nosync_flowcell)\n # skip non-flowcell folders\n if not re.match(FC_NAME_RE, os.path.basename(flowcell_path)):\n continue\n card = self._get_card_by_name(nosync_flowcell)\n # if the card is not on Trello board, create it\n if card is None:\n status = FlowcellStatus(flowcell_path)\n flowcell = Flowcell.init_flowcell(status)\n self._update_card(flowcell)\n else:\n nosync_list = self._get_list_by_name(FC_STATUSES['NOSYNC'])\n card.change_list(nosync_list.id)\n\n def _check_archived_flowcells(self, data_folder):\n # if nosync folder exists\n if os.path.exists(os.path.join(data_folder, FC_STATUSES['NOSYNC']).lower()):\n # get cards from the nosync list\n for card in self._get_cards_by_list(FC_STATUSES['NOSYNC']):\n localhost = socket.gethostname()\n # if the flowcell belongs to the server\n if localhost in card.description:\n # check if the flowcell has been deleted from the nosync folder\n if card.name not in os.listdir(os.path.join(data_folder, FC_STATUSES['NOSYNC'].lower())):\n archived_list = self._get_list_by_name(FC_STATUSES['ARCHIVED'])\n card.change_list(archived_list.id)\n\n def _update_card(self, flowcell):\n # todo: beautify the method\n trello_card = self._get_trello_card(flowcell) # None\n flowcell_list = self._get_list_by_name(flowcell.trello_list)\n\n # if not card on trello board\n if trello_card is None:\n return self._create_card(flowcell)\n else:\n # skip aborted list\n if flowcell.trello_list == FC_STATUSES['ABORTED']:\n return trello_card\n # if card is in the wrong list\n if trello_card.list_id != flowcell_list.id:\n # move card\n trello_card.change_list(flowcell_list.id)\n\n # if card is in the right list\n else:\n # todo: checkstatus -> taking too long?\n return trello_card\n\n # update due_time\n trello_card.set_due(flowcell.due_time)\n if flowcell.trello_list == FC_STATUSES['CHECKSTATUS']:\n trello_card.comment(flowcell.status.warning)\n return trello_card\n\n def _create_card(self, flowcell):\n trello_list = self._get_list_by_name(flowcell.trello_list)\n if not trello_list:\n raise RuntimeError('List {} cannot be found in TrelloBoard {}'.format(flowcell.status, self.trello_board))\n\n trello_card = trello_list.add_card(name=flowcell.full_name, desc=flowcell.get_formatted_description())\n if flowcell.trello_list == FC_STATUSES['CHECKSTATUS']:\n trello_card.comment(flowcell.status.warning)\n trello_card.set_due(flowcell.due_time)\n self._add_label(trello_card, flowcell)\n\n def _add_label(self, card, flowcell):\n server = flowcell.server\n label = self._get_label_by_name(server)\n if label is None:\n color = self._get_next_color()\n label = self.trello_board.add_label(name=server, color=color)\n if label.id not in [label.id for label in card.labels]:\n card.add_label(label)\n\n def _get_label_by_name(self, name):\n labels = self.trello_board.get_labels()\n for label in labels:\n if label.name == name:\n return label\n return None\n\n def _get_list_by_name(self, list_name):\n for item in self.trello_lists:\n if item.name == list_name:\n return item\n return None\n\n def _get_cards_by_list(self, list_name):\n trello_list = self._get_list_by_name(list_name)\n result = []\n for card in self.trello_cards:\n if card.list_id == trello_list.id:\n result.append(card)\n return result\n\n def _get_card_by_name(self, card_name):\n for card in self.trello_cards:\n if card.name == card_name:\n return card\n\n def _get_trello_card(self, flowcell):\n for card in self.trello_cards:\n if flowcell.full_name == card.name:\n return card\n return None\n\n def _get_next_color(self):\n labels = self.trello_board.get_labels()\n colors = [label.color for label in labels] if labels else []\n # if all colors are used take the first one\n if colors == COLORS:\n return COLORS[0]\n\n # if not all the colors are used, take the one which is not used\n elif set(colors) != set(COLORS):\n for color in COLORS:\n if color not in colors:\n return color\n\n else: # set(colors) == set(COLORS):\n # otherwise take the color which is used the least\n color_groups = {} # how many times each color has been used already\n for color in COLORS:\n color_groups[color] = colors.count(color)\n\n for color, count in color_groups:\n if count == min(color_groups.values()):\n return color", "sub_path": "hugin/flowcell_monitor.py", "file_name": "flowcell_monitor.py", "file_ext": "py", "file_size_in_byte": 8773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "trello.TrelloClient", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 84, "usage_type": "call"}, {"api_name": "re.match", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flowcell_status.FlowcellStatus", "line_number": 90, "usage_type": "call"}, {"api_name": "hugin.flowcells.Flowcell.init_flowcell", "line_number": 92, "usage_type": "call"}, {"api_name": "hugin.flowcells.Flowcell", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flowcell_status.FlowcellStatus", "line_number": 113, "usage_type": "call"}, {"api_name": "hugin.flowcells.Flowcell.init_flowcell", "line_number": 114, "usage_type": "call"}, {"api_name": "hugin.flowcells.Flowcell", "line_number": 114, "usage_type": "name"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 117, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 122, "usage_type": "name"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 124, "usage_type": "name"}, {"api_name": "socket.gethostname", "line_number": 125, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 129, "usage_type": "name"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 130, "usage_type": "name"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 143, "usage_type": "name"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 157, "usage_type": "name"}, {"api_name": "flowcell_status.FC_STATUSES", "line_number": 167, "usage_type": "name"}]} {"seq_id": "566557362", "text": "import os.path as op\nimport glob\nimport numpy as np\nimport pandas as pd\nimport subprocess\nimport spacy\nfrom wordfreq import zipf_frequency as word_frequency\nimport subprocess\ntry:\n spacy.load('nl_core_news_sm')\nexcept:\n process = subprocess.Popen('python -m spacy download nl_core_news_sm'.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n# Setup paths and file names\nthis_path = '/' + op.join(*op.realpath(__file__).split('/')[:-1])\n\n\ndef setup_datapath():\n fname_datapath = '/' + this_path + '/data_path.txt'\n\n if not op.isfile(fname_datapath):\n data_path = input(\"Enter data path (or create a data_path.txt):\")\n if data_path[-1] != '/':\n data_path += '/'\n with open(fname_datapath, 'w') as f:\n f.write(data_path)\n with open(fname_datapath, 'r') as f:\n data_path = f.read().strip('\\n')\n return data_path\n\n\ndef setup_logfiles():\n fname_logfiles = '/' + this_path + '/log_files.csv'\n data_path = setup_datapath()\n if not op.isfile(fname_logfiles):\n tasks = dict(visual='Vis', auditory='Aud')\n log_files = list()\n\n for task in ('visual', 'auditory'):\n\n log_path = data_path + 'sourcedata/meg_task/'\n files = glob.glob(log_path + \"*-MEG-MOUS-%s*.log\" % tasks[task])\n\n for file in np.sort(files):\n subject = file.split('-')[0].split('/')[-1]\n log_files.append(dict(\n subject=int(subject[1:]),\n task=task,\n log_id=int(file.split('-')[1]),\n log_file=op.join('sourcedata', 'meg_task',\n file.split('/')[-1]),\n meg_file=op.join('sub-' + subject, 'meg',\n 'sub-%s_task-%s_meg.ds' % (subject, task))\n )\n )\n log_files = pd.DataFrame(log_files)\n # Remove corrupted log\n log_files = log_files.loc[(log_files.subject != 1006) &\n (log_files.subject != 1017)]\n log_files.to_csv(fname_logfiles)\n return pd.read_csv(fname_logfiles)\n\n\ndef setup_stimuli():\n fname_stimuli = '/' + this_path + '/stimuli.csv'\n data_path = setup_datapath()\n if not op.isfile(fname_stimuli):\n\n source = op.join(data_path, 'stimuli', 'stimuli.txt')\n\n with open(source, 'r') as f:\n stimuli = f.read()\n while ' ' in stimuli:\n stimuli.replace(' ', ' ')\n\n # clean up\n stimuli = stimuli.split('\\n')[:-1]\n stim_id = [int(s.split(' ')[0]) for s in stimuli]\n sequences = [' '.join(s.split(' ')[1:]) for s in stimuli]\n stimuli = pd.DataFrame([dict(index=idx, sequence=seq)\n for idx, seq in zip(stim_id, sequences)])\n stimuli.to_csv(fname_stimuli, index=False)\n return pd.read_csv(fname_stimuli, index_col='index')\n\n\ndef setup_morphemes():\n # download\n commands = ('polyglot download morph2.en morph2.ar',\n 'polyglot download morph2.nl')\n for command in commands:\n subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n\n\ndata_path = setup_datapath()\nlog_files = setup_logfiles()\nstimuli = setup_stimuli()\n\n\ndef _parse_log(log_fname):\n with open(log_fname, 'r') as f:\n text = f.read()\n\n # Fixes broken inputs\n text = text.replace('.\\n', '.')\n\n # file is made of two blocks\n block1, block2 = text.split('\\n\\n\\n')\n\n # read first header\n header1 = block1.replace(' ', '_').split('\\n')[3].split('\\t')\n header1[6] = 'time_uncertainty'\n header1[8] = 'duration_uncertainty'\n\n # read first data\n df1 = pd.DataFrame([s.split('\\t') for s in block1.split('\\n')][5:],\n columns=header1)\n # the two dataframe are only synced on certains rows\n common_samples = ('Picture', 'Sound', 'Nothing')\n sel = df1['Event_Type'].apply(lambda x: x in common_samples)\n index = df1.loc[sel].index\n\n # read second header\n header2 = block2.replace(' ', '_').split('\\n')[0].split('\\t')\n header2[7] = 'time_uncertainty'\n header2[9] = 'duration_uncertainty'\n\n # read second data\n df2 = pd.DataFrame([s.split('\\t') for s in block2.split('\\n')[2:-1]],\n columns=header2, index=index)\n\n # remove duplicate\n duplicates = np.intersect1d(df1.keys(), df2.keys())\n for key in duplicates:\n assert (df1.loc[index, key] == df2[key].fillna('')).all()\n df2.pop(key)\n\n log = pd.concat((df1, df2), axis=1)\n return log\n\n\ndef _clean_log(log):\n # Relabel condition: only applies to sample where condition changes\n translate = dict(\n ZINNEN='sentence',\n WOORDEN='word_list',\n FIX='fix',\n QUESTION='question',\n Response='response',\n ISI='isi',\n blank='blank',\n )\n for key, value in translate.items():\n sel = log.Code.astype(str).str.contains(key)\n log.loc[sel, 'condition'] = value\n log.loc[log.Code == '', 'condition'] = 'blank'\n\n # Annotate sequence idx and extend context to all trials\n start = 0\n block = 0\n context = 'init'\n log['new_context'] = False\n query = 'condition in (\"word_list\", \"sentence\")'\n for idx, row in log.query(query).iterrows():\n log.loc[start:idx, 'context'] = context\n log.loc[start:idx, 'block'] = block\n log.loc[idx, 'new_context'] = True\n context = row.condition\n block += 1\n start = idx\n log.loc[start:, 'context'] = context\n log.loc[start:, 'block'] = block\n\n # Format time\n log['time'] = 0\n idx = log.Time.str.isnumeric() == True # noqa\n log.loc[idx, 'time'] = log.loc[idx, 'Time'].astype(float) / 1e4\n\n # Extract individual word\n log.loc[log.condition.isna(), 'condition'] = 'word'\n idx = log.condition == 'word'\n words = log.Code.str.strip('0123456789 ')\n log.loc[idx, 'word'] = words.loc[idx]\n sel = log.query('word==\"\" and condition==\"word\"').index\n log.loc[sel, 'word'] = pd.np.nan\n log.loc[log.word.isna() & (log.condition == \"word\"), 'condition'] = 'blank'\n return log\n\n\ndef _add_stim_id(log, verbose):\n # Find beginning of each sequence (word list or sentence)\n start = 0\n sequence_pos = -1\n for idx, row in log.query('condition == \"fix\"').iterrows():\n if sequence_pos >= 0:\n log.loc[start:idx, 'sequence_pos'] = sequence_pos\n sequence_pos += 1\n start = idx\n log.loc[start:, 'sequence_pos'] = sequence_pos\n\n # Find corresponding stimulus id\n stim_id = 0\n lower30 = lambda s: s[:30].lower() # noqa\n stimuli['first_30_chars'] = stimuli.sequence.apply(lower30)\n sel = slice(0, 0)\n for pos, row in log.groupby('sequence_pos'):\n if pos == -1:\n continue\n\n # select words in this sequence\n sel = row.condition == \"word\"\n if not sum(sel):\n continue\n\n # match with stimuli\n first_30_chars = ' '.join(row.loc[sel, 'word'])[:30].lower() # noqa\n stim_id = stimuli.query('first_30_chars == @first_30_chars').index\n assert len(stim_id) == 1\n stim_id = stim_id[0]\n\n n_words = len(stimuli.loc[stim_id, 'sequence'].split(' '))\n if (n_words != sum(sel)) and verbose:\n print('mistach of %i words in %s (stim %i)' % (n_words - sum(sel),\n pos, stim_id))\n print('stim: %s' % stimuli.loc[stim_id, 'sequence'])\n print('log: %s' % ' '.join(row.loc[sel, 'word']))\n\n # Update\n log.loc[row.index, 'stim_id'] = stim_id\n return log\n\n\ndef read_log(log_fname, task='auto', verbose=False):\n log = _parse_log(log_fname)\n log = _clean_log(log)\n if task == 'auto':\n task = 'visual' if log_fname[-7:] == 'Vis.log' else 'auditory'\n if task == 'visual':\n # TODO: add sequence annotation for auditory\n log = _add_stim_id(log, verbose=verbose)\n return log\n\n\ndef get_log_times(log, events, sfreq):\n sel = np.sort(np.r_[\n np.where(events[:, 2] == 20)[0], # fixation\n np.where(events[:, 2] == 10)[0] # context\n ])\n common_megs = events[sel]\n common_logs = log.query('(new_context == True) or condition==\"fix\"')\n\n last_log = common_logs.time[0]\n last_meg = common_megs[0, 0]\n last_idx = 0\n assert len(common_megs) == len(common_logs)\n for common_meg, (idx, common_log) in zip(\n common_megs, common_logs.iterrows()):\n\n if common_meg[2] == 20:\n assert common_log.condition == 'fix'\n else:\n assert common_log.condition in ('sentence', 'word_list')\n\n log.loc[idx, 'meg_time'] = common_meg[0] / sfreq\n\n sel = slice(last_idx+1, idx)\n times = log.loc[sel, 'time'] - last_log + last_meg / sfreq\n assert np.all(np.isfinite(times))\n log.loc[sel, 'meg_time'] = times\n\n last_log = common_log.time\n last_meg = common_meg[0]\n last_idx = idx\n\n assert np.isfinite(last_log) * np.isfinite(last_meg)\n\n # last block\n sel = slice(last_idx+1, None)\n times = log.loc[sel, 'time'] - last_log + last_meg / sfreq\n log.loc[sel, 'meg_time'] = times\n log['meg_sample'] = np.array(log.meg_time.values * sfreq, int)\n return log\n\n\ndef match_list(A, B, on_replace='delete'):\n \"\"\"Match two lists of different sizes and return corresponding indice\n Parameters\n ----------\n A: list | array, shape (n,)\n The values of the first list\n B: list | array: shape (m, )\n The values of the second list\n Returns\n -------\n A_idx : array\n The indices of the A list that match those of the B\n B_idx : array\n The indices of the B list that match those of the A\n \"\"\"\n from Levenshtein import editops\n\n unique = np.unique(np.r_[A, B])\n label_encoder = dict((k, v) for v, k in enumerate(unique))\n\n def int_to_unicode(array):\n return ''.join([str(chr(label_encoder[ii])) for ii in array])\n\n changes = editops(int_to_unicode(A), int_to_unicode(B))\n B_sel = np.arange(len(B)).astype(float)\n A_sel = np.arange(len(A)).astype(float)\n for type, val_a, val_b in changes:\n if type == 'insert':\n B_sel[val_b] = np.nan\n elif type == 'delete':\n A_sel[val_a] = np.nan\n elif on_replace == 'delete':\n # print('delete replace')\n A_sel[val_a] = np.nan\n B_sel[val_b] = np.nan\n elif on_replace == 'keep':\n # print('keep replace')\n pass\n else:\n raise NotImplementedError\n B_sel = B_sel[np.where(~np.isnan(B_sel))]\n A_sel = A_sel[np.where(~np.isnan(A_sel))]\n assert len(B_sel) == len(A_sel)\n return A_sel.astype(int), B_sel.astype(int)\n\n\ndef add_part_of_speech(df):\n sentences = ' '.join(df['word'].values)\n while ' ' in sentences:\n sentences = sentences.replace(' ', ' ')\n\n nlp = spacy.load(\"nl_core_news_sm\")\n doc = [i for i in nlp(sentences)]\n from_idx, to_idx = match_list([i.text for i in doc],\n df.word.values)\n\n part_of_speech = [doc[i].pos_ for i in from_idx]\n idx = df.index.values[to_idx]\n df.loc[idx, 'pos'] = part_of_speech\n df.loc[df.pos == 'X', 'pos'] = pd.np.nan\n #categories = ['ADJ', 'ADP', 'ADV', 'CONJ', 'DET',\n # 'NOUN', 'NUM', 'PRON', 'PROPN', 'VERB']\n #df = df.join(pd.get_dummies(df.pos, columns=categories))\n df = df.join(pd.get_dummies(df.pos))\n if 'PROPN' not in df.keys():\n verb_idx = df.columns.get_loc(\"VERB\")\n df.insert(verb_idx, 'PROPN', 0)\n\n return df\n\n\ndef add_word_frequency(df):\n freq = df.word.apply(lambda word: word_frequency(word, 'nl')) # noqa\n df['word_freq'] = freq\n return df\n\n\ndef add_word_length(df):\n df['word_length'] = df.word.astype(str).apply(len)\n return df\n\n\ndef add_letter_count(df):\n word = df.word.str\n for letter in 'abcdefghijklmnopqrstuvwxyz':\n df[letter] = word.count(letter)\n return df\n\n\ndef read_mri_events(event_fname):\n \"\"\"This is needs to be enriched depending on the analysis\n \"\"\"\n # Read MRI events\n events = pd.read_csv(event_fname, sep='\\t')\n\n # Add context: sentence or word list?\n contexts = dict(WOORDEN='word_list', ZINNEN='sentence')\n for key, value in contexts.items():\n sel = events.value.str.contains(key)\n events.loc[sel, 'context'] = value\n events.loc[sel, 'condition'] = value\n\n # Clean up MRI event mess\n sel = ~events.context.isna()\n start = 0\n context = 'init'\n for idx, row in events.loc[sel].iterrows():\n events.loc[start:idx, 'context'] = context\n start = idx\n context = row.context\n events.loc[start:, 'context'] = context\n\n # Add event condition: word, blank, inter stimulus interval etc\n conditions = (('50', 'pulse'), ('blank', 'blank'), ('ISI', 'isi'))\n for key, value in conditions:\n sel = events.value == key\n events.loc[sel, 'condition'] = value\n\n events.loc[events.value.str.contains('FIX '), 'condition'] = 'fix'\n\n # Extract words from file\n sel = events.condition.isna()\n words = events.loc[sel, 'value'].apply(lambda s: s.strip('0123456789 '))\n events.loc[sel, 'word'] = words\n\n # Remove empty words\n sel = (events.word.astype(str).apply(len) == 0) & (events.condition.isna())\n events.loc[sel, 'word'] = pd.np.nan\n events.loc[sel, 'condition'] = 'blank'\n events.loc[~events.word.isna(), 'condition'] = 'word'\n\n # --- Add word frequency\n sel = events.condition == 'word'\n\n def get_word_freq(word):\n return word_frequency(word, 'en', wordlist='best', minimum=0.0)\n\n events.loc[sel, 'word_freq'] = events.loc[sel, 'word'].apply(get_word_freq)\n\n # --- Add word length\n sel = events.condition == 'word'\n events['word_length'] = events.word.astype(str).apply(len)\n\n # --- TODO Add whatever features may be relevant here\n\n return events\n", "sub_path": "code/meg_experiment/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 14009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "spacy.load", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.realpath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 84, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 92, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.np", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 302, "usage_type": "attribute"}, {"api_name": "Levenshtein.editops", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 318, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 319, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 326, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 336, "usage_type": "call"}, {"api_name": "pandas.np", "line_number": 344, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 348, "usage_type": "call"}, {"api_name": "wordfreq.zipf_frequency", "line_number": 357, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 378, "usage_type": "call"}, {"api_name": "pandas.np", "line_number": 412, "usage_type": "attribute"}, {"api_name": "wordfreq.zipf_frequency", "line_number": 420, "usage_type": "call"}]} {"seq_id": "368297327", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom notifications.models import Subscription\n\nFROM_EMAIL = getattr(settings, 'NOTIFICATION_FROM_EMAIL', 'notifications@wheresyourtrash.com')\n\nclass Command(BaseCommand):\n help = 'Run through subscriptions sending out notifications'\n\n def handle(self, *args, **options):\n # Gather all subscriptions that need to go out\n #subs = Subscription.objects.live()\n for sub in Subscription.objects.all():\n if sub.day_of_pickup:\n logger.info('Sending notification to {0}'.format(sub.user))\n pickup_type = sub.district.district_type.lower() # + '_notification'\n if sub.subscription_type == 'SMS':\n recipient = sub.clean_phone_number + '@' + sub.service_provider.email_root\n else:\n recipient = sub.user.email\n\n if sub.district.district_type == 'TRASH':\n pickup_str = 'trash'\n else:\n pickup_str = 'recycling'\n txt_content = 'Is your {0} outside and ready? The folks from {1} are going to be picking it up later today!!'.format(pickup_str, sub.district.municipality.name)\n email = EmailMessage(\"Where's Your Trash?\", txt_content, FROM_EMAIL, [recipient])\n email.send()\n\n '''\n send_templated_mail(template_name=template_name,\n from_email=FROM_EMAIL,\n recipient_list=[recipient],\n context={'subscription':sub})\n '''\n\n", "sub_path": "wheresyourtrash/apps/notifications/management/commands/send_notifications.py", "file_name": "send_notifications.py", "file_ext": "py", "file_size_in_byte": 1795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 12, "usage_type": "name"}, {"api_name": "notifications.models.Subscription.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "notifications.models.Subscription.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "notifications.models.Subscription", "line_number": 18, "usage_type": "name"}, {"api_name": "django.core.mail.EmailMessage", "line_number": 32, "usage_type": "call"}]} {"seq_id": "142195988", "text": "from django.contrib import admin\n\nfrom django.contrib.admin import AdminSite\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom .models import Category,Item,SalesMeasure,Presentation,OrderItems,Order,Customer,Payment,Shop\nfrom django.shortcuts import render,redirect\nfrom django.contrib.admin import *\n\nfrom GorceryDelivery.forms import OrderDisplayForm\nfrom GorceryDelivery.modules.PDFGenerator import *\nfrom GorceryDelivery.modules.AggregateOrderPDF import *\nfrom datetime import datetime\n\nclass MyAdminSite(AdminSite):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n allows complete inheritance from the default admin: default registration of USER and GROUP in admin\n :param args:\n :param kwargs:\n \"\"\"\n super(MyAdminSite, self).__init__(*args, **kwargs)\n self._registry.update(site._registry) # PART 2\n\n\n\n def get_urls(self):\n from django.urls import path,re_path\n urls = super().get_urls()\n urls += [\n\n path('customer_orders/', self.admin_view(self.get_daily_orders)),\n path('customer_orders//', self.admin_view(self.display_daily_orders)),\n re_path(r'^download_single_order/(\\d+)/$', self.admin_view(self.download_single_order)),\n re_path(r'^customer_orders/details/(\\d+)/$', self.admin_view(self.display_order_details)),\n path('customer_orders/items_by_day//', self.admin_view(self.aggregate_daily_orders))\n #path('customer_orders/print_all//', self.admin_view(self.print_orders_by_day))\n ]\n return urls\n\n\n def get_daily_orders(self, request):\n \"\"\"\n View function: allows displays of all order given a specific delivery date\n :param request: Http request\n :return: Http redirect to display page; if method is not 'POST', renders delivery date form\n \"\"\"\n\n if request.method == 'POST':\n form = OrderDisplayForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n next_url = '/admin/customer_orders/' + cd.get('date') + '/'\n #assert False\n return HttpResponseRedirect(next_url)# redirect to url of 'display daily orders'\n\n else:\n form = OrderDisplayForm()\n\n return render(request, 'admin/get_orders.html', {'form': form})\n\n\n def display_daily_orders(self, request, delivery_date):\n \"\"\"\n View function: actually dispays the orders\n :param request: Http request\n :param delivery_date: delivery date to query\n :return: renders a tabled list of all the orders\n \"\"\"\n\n date_error = False\n # convert the date first\n try:#since date input is exposed using get: anticipating incorrect input\n objDate = datetime.strptime(delivery_date, '%d-%m-%Y')\n delivery_date = objDate.strftime('%Y-%m-%d')\n except:\n delivery_date = None\n date_error = True\n\n date_orders = Order.objects.filter(delivery_date=delivery_date)\n\n return render(request, 'admin/show_orders.html', {'orders': date_orders,\n 'date': delivery_date,\n 'date_error': date_error})\n\n\n def download_single_order(self, request, order_Id):\n \"\"\"\n View function: downloads the user order form the 'display daily orders' list as pdf\n :param request: Http request\n :param order_Id: pk of the order in db\n :return: pdf file as Http response\n \"\"\"\n\n bought_items = None\n order_details = None\n\n order_Id = int(order_Id)\n\n try:\n order_details = Order.objects.get(id=order_Id)\n except:\n order_details = None\n\n bought_items = OrderItems.objects.filter(order=order_details)\n\n if not bought_items == None:\n the_report = PDFGenerator(request,order_details, bought_items)\n return the_report.download_user_order()\n\n\n\n def display_order_details(self, request, order_id):\n \"\"\"\n View function: displays the details of a selected custmer order\n :param request: Http request\n :param order_id: pk of the selected order\n :return: renders the details\n \"\"\"\n\n order_details = None\n bought_items = None\n order_id = int(order_id)\n\n try:\n order_details = Order.objects.get(id=order_id)\n except:\n order_details = None\n\n bought_items = OrderItems.objects.filter(order=order_details)\n\n\n return render(request,'admin/display_order_details.html', { 'order': order_details,\n 'bought_items': bought_items})\n\n\n\n def aggregate_daily_orders(self, request, delivery_date):\n \"\"\"\n View Function: downloads aggregated orders for a given date showi ng an item and how many times it was ordered.\n :param request: Http request\n :param delivery_date: delivery date\n :return: pdf file as http response\n \"\"\"\n\n date_error = False\n try:\n objDate = datetime.strptime(delivery_date, '%Y-%m-%d')\n delivery_date = objDate.strftime('%Y-%m-%d')\n except:\n date_error = True\n\n order_items_list = []\n\n if not date_error:\n order_list = Order.objects.filter(delivery_date = delivery_date)\n for order in order_list:\n item_list = OrderItems.objects.filter(order= order)\n order_items_list.append(item_list)\n\n\n daily_order = AggregateOrderPDF(order_items_list, delivery_date)\n daily_order.compute_aggregate_list()\n return daily_order.download_aggregate_order()\n\n #assert False\n\n\n\n\n\nadmin_site = MyAdminSite()\n\n\n\n@admin.register(Item, site= admin_site)\nclass AdminItem(admin.ModelAdmin):\n list_display = ('name', 'description', 'price', 'discounted_price', 'availability')\n list_filter = ('category', 'availability')\n search_fields = ('name',)\n list_per_page = 10\n\n\n\n@admin.register(Category, site =admin_site)\nclass AdminCategory(admin.ModelAdmin):\n list_display = ('name', 'description')\n list_per_page = 10\n search_fields = ('name',)\n\n\n\n@admin.register(Customer, site =admin_site)\nclass AdminCustomer(admin.ModelAdmin):\n list_display = ('user_profile','gender', 'phone_no', 'preffered_delivery_addr')\n list_per_page = 10\n\n\n\n@admin.register(Order, site=admin_site)\nclass AdminOrder(admin.ModelAdmin):\n list_display =('buyer_firstname', 'buyer_lastname', 'buyer_gender','buyer_email', 'buyer_phone_no',\n 'order_date', 'paid_for','delivery_date','order_total','delivery_addr', 'customer' )\n list_per_page = 10\n list_filter = ('buyer_gender', 'paid_for', 'order_date', 'delivery_date')\n search_fields = ( 'buyer_firstname', 'buyer_lastname','buyer_phone_no','buyer_email')\n\n\n\n\n@admin.register(SalesMeasure, site =admin_site)\nclass AdminSalesMeasure(admin.ModelAdmin):\n list_display = ('name','description', 'item', 'price')\n list_per_page = 10\n search_fields = ('item__name','name')\n list_filter = ('item',)\n\n\n\n@admin.register(Presentation, site = admin_site)\nclass AdminPresentation(admin.ModelAdmin):\n list_per_page = 10\n list_display = ('name', 'description', 'item')\n\n\n\n@admin.register(Payment, site = admin_site)\nclass AdminPayment(admin.ModelAdmin):\n list_display = ('order', 'payment_amount', 'timestamp', 'transaction_reference', 'status','description' )\n list_per_page = 10\n\n def payment_amount(self, obj):\n return obj.order.order_total\n\n\n\n@admin.register(OrderItems, site=admin_site)\nclass AdminOrderItem(admin.ModelAdmin):\n list_display = ('order_item_desc', 'order_item_price', 'quantity', 'price_by_qty')\n list_per_page = 10\n list_filter = ('item', 'order')\n search_fields = ('item__name', 'order__buyer_firstname')\n\n\n@admin.register(Shop, site=admin_site)\nclass Shop(admin.ModelAdmin):\n list_display = ('shop_name','site_domain', 'address', 'email', 'phone_number', 'phone_number_2', 'is_active')\n", "sub_path": "GorceryDelivery/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 8253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.contrib.admin.AdminSite", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 36, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "GorceryDelivery.forms.OrderDisplayForm", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 56, "usage_type": "call"}, {"api_name": "GorceryDelivery.forms.OrderDisplayForm", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 102, "usage_type": "name"}, {"api_name": "models.OrderItems.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.OrderItems.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.OrderItems", "line_number": 106, "usage_type": "name"}, {"api_name": "models.Order.objects.get", "line_number": 127, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 127, "usage_type": "name"}, {"api_name": "models.OrderItems.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "models.OrderItems.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "models.OrderItems", "line_number": 131, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 157, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 157, "usage_type": "name"}, {"api_name": "models.OrderItems.objects.filter", "line_number": 159, "usage_type": "call"}, {"api_name": "models.OrderItems.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.OrderItems", "line_number": 159, "usage_type": "name"}, {"api_name": "{'path': 'django.urls.path', 're_path': 'django.urls.re_path'}", "line_number": 173, "usage_type": "call"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 178, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 178, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 177, "usage_type": "call"}, {"api_name": "models.Item", "line_number": 177, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 177, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 187, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 187, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 186, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 186, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 186, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 195, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 195, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 194, "usage_type": "call"}, {"api_name": "models.Customer", "line_number": 194, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 194, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 202, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 202, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 201, "usage_type": "call"}, {"api_name": "models.Order", "line_number": 201, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 201, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 213, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 213, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 212, "usage_type": "call"}, {"api_name": "models.SalesMeasure", "line_number": 212, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 212, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 222, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 222, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 221, "usage_type": "call"}, {"api_name": "models.Presentation", "line_number": 221, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 221, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 229, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 229, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 228, "usage_type": "call"}, {"api_name": "models.Payment", "line_number": 228, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 228, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 239, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 239, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 238, "usage_type": "call"}, {"api_name": "models.OrderItems", "line_number": 238, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 238, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 247, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 247, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 246, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 246, "usage_type": "name"}]} {"seq_id": "89331959", "text": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\ndef main():\n chrome_options = Options()\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.headless = True # also works\n driver = webdriver.Chrome(executable_path='/Users/jorgecorradi/Downloads/chromedriver', options=chrome_options)\n driver.get('https://www.infomoney.com.br/cotacoes/empresas-b3/')\n\n symbols = []\n for el in driver.find_elements_by_css_selector('.list-companies a'):\n symbols.append(el.text)\n\n with open('/Users/jorgecorradi/Desktop/stocks.csv', 'w') as csv_file:\n csv_file.write(\"symbol;companyName;stockType;sector;category;currency\" + \"\\n\")\n for symbol in symbols:\n driver.get('https://www.fundamentus.com.br/detalhes.php?papel=' + symbol)\n print(\"Scraping \" + symbol)\n\n try:\n data = {}\n data['\"symbol\"'] = symbol\n\n type = driver.find_element_by_css_selector(\n 'body > div.center > div.conteudo.clearfix > table:nth-child(2) > tbody > tr:nth-child(1) > td.label.w15 > span.txt').text\n\n if type == 'Papel':\n data['companyName'] = driver.find_element_by_css_selector(\n 'body > div.center > div.conteudo.clearfix > table:nth-child(2) > tbody > tr:nth-child(3) > td:nth-child(2) > span').text\n\n data['stockType'] = driver.find_element_by_css_selector(\n 'body > div.center > div.conteudo.clearfix > table:nth-child(2) > tbody > tr:nth-child(2) > td:nth-child(2) > span').text\n data['sector'] = driver.find_element_by_css_selector(\n 'body > div.center > div.conteudo.clearfix > table:nth-child(2) > tbody > tr:nth-child(4) > td:nth-child(2) > span > a').text\n data['category'] = 'STOCK'\n else:\n data['companyName'] = driver.find_element_by_css_selector(\n 'body > div.center > div.conteudo.clearfix > table:nth-child(3) > tbody > tr:nth-child(2) > td:nth-child(2) > span').text\n\n data['stockType'] = 'FII'\n data['sector'] = driver.find_element_by_css_selector(\n 'body > div.center > div.conteudo.clearfix > table:nth-child(3) > tbody > tr:nth-child(4) > td:nth-child(2) > span > a').text\n data['category'] = 'REF'\n\n data['currency'] = 'BRL'\n\n csv_file.write(\";\".join(data.values()) + \"\\n\")\n csv_file.flush()\n except:\n print(\"couldn't get info for \" + symbol)\n\n\nif __name__ == '__main__':\n main()\n print(\"Done Scraping\")\n", "sub_path": "stock_discover.py", "file_name": "stock_discover.py", "file_ext": "py", "file_size_in_byte": 2898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 6, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}]} {"seq_id": "15193056", "text": "from flask_restful import Resource, reqparse, abort, fields, marshal_with\r\nfrom datetime import datetime\r\nfrom . import citizen_api\r\nfrom ..database_models import Citizen\r\nfrom .. import data_handler, sms_handler\r\nfrom app import db\r\n\r\n# Parser to check that required arguments are sent to add a citizen to the database\r\ncitizen_put_args = reqparse.RequestParser()\r\ncitizen_put_args.add_argument(\"id_citizen\", type=str, required=True,\r\n help=\"The ID of the citizen is required. The argument should be a String.\")\r\ncitizen_put_args.add_argument(\"email\", type=str, required=True,\r\n help=\"The email of the citizen is required. The argument should be a String.\")\r\ncitizen_put_args.add_argument(\"name\", type=str, required=True,\r\n help=\"The name of the citizen is required. The argument should be a String.\")\r\ncitizen_put_args.add_argument(\"surname\", type=str, required=True,\r\n help=\"The surname of the citizen is required. The argument should be a String.\")\r\ncitizen_put_args.add_argument(\"date_of_birth\", type=str, required=True,\r\n help=\"The date of birth of the citizen is required in the format YYYY-MM-DD. \"\r\n \"The argument should be a String.\")\r\ncitizen_put_args.add_argument(\"mobile_num\", type=str, required=True,\r\n help=\"The mobile number of the citizen is required. The argument should be a String.\")\r\ncitizen_put_args.add_argument(\"medical_aid\", type=str,\r\n help=\"The medical aid number of the citizen should be a String.\")\r\ncitizen_put_args.add_argument(\"citizen_address\", type=str,\r\n help=\"The citizen address should be a String.\")\r\n\r\n# Parser to check that required arguments are sent to get a citizen from the database\r\ncitizen_get_args = reqparse.RequestParser()\r\ncitizen_get_args.add_argument(\"id_citizen\", type=str, required=True,\r\n help=\"The ID of the citizen is required. The argument should be a String.\")\r\n\r\n# Parser to check that required arguments are sent to update a citizen in the database\r\ncitizen_patch_args = reqparse.RequestParser()\r\ncitizen_patch_args.add_argument(\"id_citizen\", type=str, required=True,\r\n help=\"The ID of the citizen is required. The argument should be a String.\")\r\ncitizen_patch_args.add_argument(\"email\", type=str,\r\n help=\"The citizen email address should be a String.\")\r\ncitizen_patch_args.add_argument(\"name\", type=str,\r\n help=\"The citizen name should be a String.\")\r\ncitizen_patch_args.add_argument(\"surname\", type=str,\r\n help=\"The citizen surname should be a String.\")\r\ncitizen_patch_args.add_argument(\"date_of_birth\", type=str,\r\n help=\"The date of birth of the citizen is should be in the format YYYY-MM-DD. \"\r\n \"The argument should be a String.\")\r\ncitizen_patch_args.add_argument(\"mobile_num\", type=str,\r\n help=\"The mobile number of the citizen should be a String\")\r\ncitizen_patch_args.add_argument(\"medical_aid\", type=str,\r\n help=\"The medical aid number of the citizen should be a String.\")\r\ncitizen_patch_args.add_argument(\"citizen_address\", type=str,\r\n help=\"Address is required in string format\")\r\n\r\n# Parser to check that required arguments are sent to delete a citizen from the database\r\ncitizen_del_args = reqparse.RequestParser()\r\ncitizen_del_args.add_argument(\"id_citizen\", type=str, required=True,\r\n help=\"The ID of the citizen is required. The argument should be a String.\")\r\n\r\n# Fields to marshal the responses\r\nresource_fields = {\r\n 'id_citizen': fields.String,\r\n 'email': fields.String,\r\n 'name': fields.String,\r\n 'surname': fields.String,\r\n 'date_of_birth': fields.String,\r\n 'mobile_num': fields.String,\r\n 'medical_aid': fields.String,\r\n 'citizen_address': fields.String\r\n}\r\n\r\n\r\n# Class to handle methods related to citizens\r\nclass CitizenResource(Resource):\r\n @marshal_with(resource_fields)\r\n def get(self):\r\n args = citizen_get_args.parse_args()\r\n data_handler.clean_data(args)\r\n return get_citizen(args), 200\r\n\r\n def put(self):\r\n args = citizen_put_args.parse_args()\r\n data_handler.clean_data(args)\r\n add_citizen(args)\r\n return {\"message\": \"Added to database\"}, 201\r\n\r\n def patch(self):\r\n args = citizen_patch_args.parse_args()\r\n data_handler.clean_data(args)\r\n update_citizen(args)\r\n return {\"message\": \"Updated the database\"}, 200\r\n\r\n def delete(self):\r\n args = citizen_del_args.parse_args()\r\n data_handler.clean_data(args)\r\n delete_citizen(args)\r\n return {\"message\": \"Deleted from database\"}, 204\r\n\r\n\r\n# Add resource to the API\r\ncitizen_api.add_resource(CitizenResource, \"\")\r\n\r\n\r\n# Get a citizen by their ID and return the full database entry\r\ndef get_citizen(args):\r\n result = Citizen.query.filter_by(id_citizen=args[\"id_citizen\"]).first()\r\n if result:\r\n return result\r\n else:\r\n abort(404, message=\"A citizen with this ID does not exist\")\r\n\r\n\r\n# Add a citizen to the database\r\ndef add_citizen(args):\r\n result = Citizen.query.filter_by(id_citizen=args[\"id_citizen\"]).first()\r\n if result:\r\n abort(409, message=\"A citizen with this ID already exists\")\r\n else:\r\n try:\r\n date = datetime.strptime(args[\"date_of_birth\"], '%Y-%m-%d')\r\n new_citizen = Citizen(id_citizen=args[\"id_citizen\"], email=args[\"email\"], name=args[\"name\"],\r\n surname=args[\"surname\"], date_of_birth=date, mobile_num=args[\"mobile_num\"],\r\n medical_aid=args[\"medical_aid\"],\r\n citizen_address=args[\"citizen_address\"])\r\n db.session.add(new_citizen)\r\n db.session.commit()\r\n sms_handler.send_mock_sms(\"You have been added\", args[\"mobile_num\"])\r\n except ValueError:\r\n abort(400, message=\"The date should be in format YYYY-MM-DD\")\r\n\r\n\r\n# Update the citizen in the database\r\ndef update_citizen(args):\r\n result = Citizen.query.filter_by(id_citizen=args[\"id_citizen\"]).first()\r\n if not result:\r\n abort(404, message=\"Citizen with this ID does not exist, cannot update\")\r\n else:\r\n if args[\"email\"]:\r\n result.email = args[\"email\"]\r\n if args[\"name\"]:\r\n result.name = args[\"name\"]\r\n if args[\"surname\"]:\r\n result.surname = args[\"surname\"]\r\n if args[\"date_of_birth\"]:\r\n try:\r\n date = datetime.strptime(args[\"date_of_birth\"], '%Y-%m-%d')\r\n result.date_of_birth = date\r\n except ValueError:\r\n abort(400, message=\"The date should be in format YYYY-MM-DD\")\r\n if args[\"mobile_num\"]:\r\n result.mobile_num = args[\"mobile_num\"]\r\n if args[\"medical_aid\"]:\r\n result.medical_aid = args[\"medical_aid\"]\r\n if args[\"citizen_address\"]:\r\n result.citizen_address = args[\"citizen_address\"]\r\n db.session.commit()\r\n\r\n\r\n# Delete a citizen from the database\r\ndef delete_citizen(args):\r\n result = Citizen.query.filter_by(id_citizen=args[\"id_citizen\"]).first()\r\n if not result:\r\n abort(404, message=\"Citizen with this ID does not exist, cannot delete\")\r\n else:\r\n db.session.delete(result)\r\n db.session.commit()\r\n", "sub_path": "app/api/citizen/citizen.py", "file_name": "citizen.py", "file_ext": "py", "file_size_in_byte": 7705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask_restful.reqparse.RequestParser", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 9, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 34, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 34, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 54, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 54, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 60, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 61, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 63, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 64, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 65, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 66, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 67, "usage_type": "name"}, {"api_name": "flask_restful.Resource", "line_number": 72, "usage_type": "name"}, {"api_name": "flask_restful.marshal_with", "line_number": 73, "usage_type": "call"}, {"api_name": "database_models.Citizen.query.filter_by", "line_number": 104, "usage_type": "call"}, {"api_name": "database_models.Citizen.query", "line_number": 104, "usage_type": "attribute"}, {"api_name": "database_models.Citizen", "line_number": 104, "usage_type": "name"}, {"api_name": "flask_restful.abort", "line_number": 108, "usage_type": "call"}, {"api_name": "database_models.Citizen.query.filter_by", "line_number": 113, "usage_type": "call"}, {"api_name": "database_models.Citizen.query", "line_number": 113, "usage_type": "attribute"}, {"api_name": "database_models.Citizen", "line_number": 113, "usage_type": "name"}, {"api_name": "flask_restful.abort", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "name"}, {"api_name": "database_models.Citizen", "line_number": 119, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 123, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 123, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 123, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 124, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 124, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 124, "usage_type": "name"}, {"api_name": "flask_restful.abort", "line_number": 127, "usage_type": "call"}, {"api_name": "database_models.Citizen.query.filter_by", "line_number": 132, "usage_type": "call"}, {"api_name": "database_models.Citizen.query", "line_number": 132, "usage_type": "attribute"}, {"api_name": "database_models.Citizen", "line_number": 132, "usage_type": "name"}, {"api_name": "flask_restful.abort", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "name"}, {"api_name": "flask_restful.abort", "line_number": 147, "usage_type": "call"}, {"api_name": "app.db.session.commit", "line_number": 154, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 154, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 154, "usage_type": "name"}, {"api_name": "database_models.Citizen.query.filter_by", "line_number": 159, "usage_type": "call"}, {"api_name": "database_models.Citizen.query", "line_number": 159, "usage_type": "attribute"}, {"api_name": "database_models.Citizen", "line_number": 159, "usage_type": "name"}, {"api_name": "flask_restful.abort", "line_number": 161, "usage_type": "call"}, {"api_name": "app.db.session.delete", "line_number": 163, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 163, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 163, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 164, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 164, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 164, "usage_type": "name"}]} {"seq_id": "600321219", "text": "# coding=UTF-8\n#่ฏปๅ–config.ini้…็ฝฎไฟกๆฏไธญ็š„ๆ•ฐๆฎ\n\nimport os\nimport codecs\nimport configparser\n#ๅฐ†่ฏปๅ–้…็ฝฎไฟกๆฏini็š„่ฟ‡็จ‹ๅฐ่ฃ…ๆˆไธ€ไธช็ฑป\nproDir = os.path.split(os.path.realpath(__file__))[0] #่ฏฅๆ–‡ไปถ็š„ๅœฐๅ€๏ผŒ่ฟ™้‡Œ็›ธๅฝ“ไบŽ่ฟ™้‡Œ็š„ๆŽฅๅฃๆต‹่ฏ•ๆ–‡ๆกฃๆก†ๆžถ่ทฏๅพ„,็ปๅฏน่ทฏๅพ„\nconfigPath = os.path.join(proDir, \"config.ini\") #ๅญ˜ๆ”พๅœจๆ น็›ฎๅฝ•ไธ‹ๆ–‡ไปถๅๆ˜ฏconfig.ini็š„ๆ–‡ไปถ๏ผŒ้…็ฝฎๆ–‡ไปถ็š„ๅฎŒๆ•ดๅœฐๅ€\n\nclass ReadConfig():\n def __init__(self):\n fd = open(configPath ,encoding=\"utf-8\") #ๆ‰“ๅผ€้…็ฝฎๆ–‡ไปถ\n data = fd.read() #่ฏปๅ–้…็ฝฎๆ–‡ไปถ\n\n # remove BOM #ๅˆคๆ–ญๆ–‡ไปถไธญๆ˜ฏๅฆๆœ‰ๆ•ฐๆฎ\n if data[:3] == codecs.BOM_UTF8:\n data = data[3:]\n file = codecs.open(configPath, \"w\")\n file.write(data)\n file.close()\n fd.close()\n\n self.cf = configparser.ConfigParser() #่ฐƒ็”จ่ฏปๅ–้…็ฝฎๆจกๅ—็š„็ฑป\n self.cf.read(configPath,encoding=\"utf-8\") #่ฏปๅ–ๆ–‡ไปถ\n\n#่Žทๅ–emallใ€httpใ€databaseๅˆ†็ป„ไธ‹ๆŒ‡ๅฎš็š„nameๅ€ผ\n def get_email(self, name):\n value = self.cf.get(\"EMAIL\", name)\n return value\n\n def get_http(self, name):\n value = self.cf.get(\"HTTP\", name)\n return value\n\n def get_headers(self, name):\n value = self.cf.get(\"HEADERS\", name)\n return value\n\n def set_headers(self, name, value):\n self.cf.set(\"HEADERS\", name, value)\n with open(configPath, 'w+') as f:\n self.cf.write(f)\n\n def get_url(self, name):\n value = self.cf.get(\"URL\", name)\n return value\n\n def get_db(self, name):\n value = self.cf.get(\"DATABASE\", name)\n return value\n\n\n#ไฟฎๆ”นhttpๅˆ†็ป„ไธ‹ๆŒ‡ๅฎšname็š„ๅ€ผvalue ็Žฐ้˜ถๆฎต่ฟ˜ๆœชไฝฟ็”จ\n def set_http(self,name,value):\n cfg = self.cf.set(\"HTTP\",name,value)\n fp = open(r'config.ini','w')\n cfg.write(fp)", "sub_path": "readConfig.py", "file_name": "readConfig.py", "file_ext": "py", "file_size_in_byte": 1940, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.path.split", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "codecs.BOM_UTF8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 19, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 24, "usage_type": "call"}]} {"seq_id": "546947584", "text": "import argparse\nimport csv\nimport math\nimport os\nimport os.path as osp\n\nimport joblib\nimport numpy as np\nimport src.ast_toolbox.mcts.BoundedPriorityQueues as BPQ\n# from example_save_trials import *\nimport tensorflow as tf\nfrom CartPole.cartpole_simulator import CartpoleSimulator\n# from garage.tf.algos.trpo import TRPO\nfrom garage.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom garage.misc import logger\nfrom garage.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom src.ast_toolbox import TRPO\nfrom src.ast_toolbox import ASTEnv\nfrom src.ast_toolbox import TfEnv\nfrom src.ast_toolbox.rewards import ASTRewardS\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" # just use CPU\n\n\n# Logger Params\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str, default='cartpole')\nparser.add_argument('--nd', type=int, default=1)\nparser.add_argument('--sut_itr', type=int, default=5)\nparser.add_argument('--n_trial', type=int, default=10)\nparser.add_argument('--trial_start', type=int, default=0)\nparser.add_argument('--n_itr', type=int, default=1000)\nparser.add_argument('--step_size', type=float, default=5.0)\nparser.add_argument('--batch_size', type=int, default=2000) # 50000\nparser.add_argument('--snapshot_mode', type=str, default=\"none\")\nparser.add_argument('--snapshot_gap', type=int, default=500)\nparser.add_argument('--log_dir', type=str, default='./Data/AST/TRPO')\nparser.add_argument('--args_data', type=str, default=None)\nargs = parser.parse_args()\nargs.log_dir += ('B' + str(args.batch_size) + 'Step' + str(args.step_size))\n\ntop_k = 10\nmax_path_length = 100\nopen_loop = False\n\ntf.set_random_seed(0)\nsess = tf.Session()\nsess.__enter__()\n\n# Instantiate the env\ndata = joblib.load(\"../CartPole/ControlPolicy/itr_\" + str(args.sut_itr) + \".pkl\")\nsut = data['policy']\nreward_function = ASTRewardS()\n\nsimulator = CartpoleSimulator(sut=sut, max_path_length=100, use_seed=False, nd=args.nd)\nenv = TfEnv(ASTEnv(open_loop=open_loop,\n simulator=simulator,\n fixed_init_state=True,\n s_0=[0.0, 0.0, 0.0 * math.pi / 180, 0.0],\n reward_function=reward_function,\n ))\n\n# Create policy\npolicy = GaussianMLPPolicy(\n name='ast_agent',\n env_spec=env.spec,\n hidden_sizes=(128, 64, 32)\n)\n\nwith open(osp.join(args.log_dir, 'total_result.csv'), mode='w') as csv_file:\n fieldnames = ['step_count']\n for i in range(top_k):\n fieldnames.append('reward ' + str(i))\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n for trial in range(args.trial_start, args.trial_start + args.n_trial):\n # Create the logger\n log_dir = args.log_dir + '/' + str(trial)\n\n tabular_log_file = osp.join(log_dir, 'process.csv')\n text_log_file = osp.join(log_dir, 'text.txt')\n params_log_file = osp.join(log_dir, 'args.txt')\n\n logger.set_snapshot_dir(log_dir)\n logger.set_snapshot_mode(args.snapshot_mode)\n logger.set_snapshot_gap(args.snapshot_gap)\n logger.log_parameters_lite(params_log_file, args)\n if trial > args.trial_start:\n old_log_dir = args.log_dir + '/' + str(trial - 1)\n logger.pop_prefix()\n logger.remove_text_output(osp.join(old_log_dir, 'text.txt'))\n logger.remove_tabular_output(osp.join(old_log_dir, 'process.csv'))\n logger.add_text_output(text_log_file)\n logger.add_tabular_output(tabular_log_file)\n logger.push_prefix(\"[\" + args.exp_name + '_trial ' + str(trial) + \"]\")\n\n np.random.seed(trial)\n\n params = policy.get_params()\n sess.run(tf.variables_initializer(params))\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n # optimizer = ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))\n\n top_paths = BPQ.BoundedPriorityQueue(top_k)\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=args.batch_size,\n step_size=args.step_size,\n n_itr=args.n_itr,\n store_paths=True,\n # optimizer= optimizer,\n max_path_length=max_path_length,\n top_paths=top_paths,\n plot=False,\n )\n\n algo.train(sess=sess, init_var=False)\n\n row_content = dict()\n row_content['step_count'] = args.n_itr * args.batch_size\n i = 0\n for (r, action_seq) in algo.top_paths:\n row_content['reward ' + str(i)] = r\n i += 1\n writer.writerow(row_content)\n", "sub_path": "examples/CartPoleNd/MultiCartPoleNd_TRPO.py", "file_name": "MultiCartPoleNd_TRPO.py", "file_ext": "py", "file_size_in_byte": 4598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 47, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 51, "usage_type": "call"}, {"api_name": "src.ast_toolbox.rewards.ASTRewardS", "line_number": 53, "usage_type": "call"}, {"api_name": "CartPole.cartpole_simulator.CartpoleSimulator", "line_number": 55, "usage_type": "call"}, {"api_name": "src.ast_toolbox.TfEnv", "line_number": 56, "usage_type": "call"}, {"api_name": "src.ast_toolbox.ASTEnv", "line_number": 56, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 59, "usage_type": "attribute"}, {"api_name": "garage.tf.policies.gaussian_mlp_policy.GaussianMLPPolicy", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "name"}, {"api_name": "csv.DictWriter", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "name"}, {"api_name": "garage.misc.logger.set_snapshot_dir", "line_number": 85, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 85, "usage_type": "name"}, {"api_name": "garage.misc.logger.set_snapshot_mode", "line_number": 86, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 86, "usage_type": "name"}, {"api_name": "garage.misc.logger.set_snapshot_gap", "line_number": 87, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 87, "usage_type": "name"}, {"api_name": "garage.misc.logger.log_parameters_lite", "line_number": 88, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 88, "usage_type": "name"}, {"api_name": "garage.misc.logger.pop_prefix", "line_number": 91, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 91, "usage_type": "name"}, {"api_name": "garage.misc.logger.remove_text_output", "line_number": 92, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "name"}, {"api_name": "garage.misc.logger.remove_tabular_output", "line_number": 93, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 93, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "name"}, {"api_name": "garage.misc.logger.add_text_output", "line_number": 94, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 94, "usage_type": "name"}, {"api_name": "garage.misc.logger.add_tabular_output", "line_number": 95, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 95, "usage_type": "name"}, {"api_name": "garage.misc.logger.push_prefix", "line_number": 96, "usage_type": "call"}, {"api_name": "garage.misc.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.variables_initializer", "line_number": 101, "usage_type": "call"}, {"api_name": "garage.baselines.linear_feature_baseline.LinearFeatureBaseline", "line_number": 102, "usage_type": "call"}, {"api_name": "src.ast_toolbox.mcts.BoundedPriorityQueues.BoundedPriorityQueue", "line_number": 105, "usage_type": "call"}, {"api_name": "src.ast_toolbox.mcts.BoundedPriorityQueues", "line_number": 105, "usage_type": "name"}, {"api_name": "src.ast_toolbox.TRPO", "line_number": 106, "usage_type": "call"}]} {"seq_id": "143241960", "text": "\nimport wx\n\n#----------------------------------------------------------------------\n\nclass TestPanel(wx.Panel):\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n\n dpc = wx.DatePickerCtrl(self, size=(120,-1),\n style=wx.DP_DROPDOWN | wx.DP_SHOWCENTURY)\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(dpc, 0, wx.ALL, 50)\n\n self.SetSizer(sizer)\n \n\n#----------------------------------------------------------------------\n\ndef runTest(frame, nb, log):\n win = TestPanel(nb, log)\n return win\n\n#----------------------------------------------------------------------\n\n\n\noverview = \"\"\"\n

wx.DatePickerCtrl

\n\nThis control allows the user to select a date. Unlike\nwx.calendar.CalendarCtrl, which is a relatively big control,\nwx.DatePickerCtrl is implemented as a small window showing the\ncurrently selected date. The control can be edited using the keyboard,\nand can also display a popup window for more user-friendly date\nselection, depending on the styles used and the platform.\n\n\n\"\"\"\n\n\n\nif __name__ == '__main__':\n import sys,os\n import run\n run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])\n\n", "sub_path": "demo/DatePickerCtrl.py", "file_name": "DatePickerCtrl.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "wx.Panel", "line_number": 6, "usage_type": "attribute"}, {"api_name": "wx.Panel.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 9, "usage_type": "attribute"}, {"api_name": "wx.DatePickerCtrl", "line_number": 11, "usage_type": "call"}, {"api_name": "wx.DP_DROPDOWN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "wx.DP_SHOWCENTURY", "line_number": 12, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "run.main", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}]} {"seq_id": "649816876", "text": "import cv2\nimport numpy as np\n\nfilename = 'cup.jpg'\n\nprint('้กฏ็คบๅœ–็‰‡')\nimage = cv2.imread(filename, -1)\n\nshape = image.shape\nh = shape[0] #้ซ˜\nw = shape[1] #ๅฏฌ\nh, w, d = image.shape #d็‚บdimension d=3 ๅ…จๅฝฉ d=1 ็ฐ้šŽ\nprint(\"ๅฏฌ = \", w, \", ้ซ˜ = \", h, \", D = \", d)\n\nimage = cv2.resize(image, (403, 302))\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (5, 5), 0)\ncircles = cv2.HoughCircles(\n gray, \n cv2.HOUGH_GRADIENT, # ๅตๆธฌๆ–นๆณ•็›ฎๅ‰ๅชๆ”ฏๆด้€™ๅ€‹ๅƒๆ•ธ\n 1, # 1ไปฃ่กจๅตๆธฌๅœ–่ˆ‡่ผธๅ…ฅๅœ–ๅคงๅฐไธ€่‡ด๏ผŒๅกซ1ๅณๅฏ\n 20, # ๅ„ๅœ“ๅฟƒ้–“็š„ๆœ€ๅฐ่ท้›ข๏ผŒ่จญๅคชๅฐๅฎนๆ˜“่ชคๅˆค๏ผŒๅคชๅคงๆœƒๅฐ‡ๆ•ธๅ€‹ๅœ“็•ถๆˆไธ€ๅ€‹\n None, # ๅ›บๅฎšๅกซ None\n 10, # cannyๆผ”็ฎ—ๆณ•็š„้ซ˜้–พๅ€ผ๏ผŒๆญคๅ€ผไธ€ๅŠ็‚บไฝŽ้–พๅ€ผ\n 75, # ่ถ…้Žๆญค้–พๅ€ผๆ‰ๆœƒ่ขซ็•ถไฝœๅœ“\n 3, # ๆœ€ๅฐๅœ“ๅŠๅพ‘\n 75 # ๆœ€ๅคงๅœ“ๅŠๅพ‘\n)\n\ncircles = circles.astype(int)\nif len(circles) > 0:\n out = image.copy()\n for x, y, r in circles[0]:\n # ็•ซๅœ“\n cv2.circle(out, (x, y), r, (0, 0, 255), 3)\n # ็•ซๅœ“ๅฟƒ\n cv2.circle(out, (x, y), 2, (0, 255, 0), 3)\n image = cv2.hconcat([image, out])\n\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\ncv2.imshow('image', image) #้กฏ็คบๅœ–็‰‡\n\nprint('ๅœจๆญค็ญ‰ๅพ…ไปปๆ„้ต็นผ็บŒ, ็นผ็บŒๅพŒๅˆช้™คๆœฌ่ฆ–็ช—')\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n", "sub_path": "_4.python/__code/AIOT่ˆ‡OpenCVๅฏฆๆˆฐๆ‡‰็”จ(็ฌฌไบŒ็‰ˆ)/ch3-12/houghcircles.py", "file_name": "houghcircles.py", "file_ext": "py", "file_size_in_byte": 1360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.HoughCircles", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.HOUGH_GRADIENT", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.hconcat", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 45, "usage_type": "call"}]} {"seq_id": "377518695", "text": "\"\"\"Ciphers module.\nProvide Asymmetric and Symmetric cipher functions.\\\n\"\"\"\nfrom enum import IntEnum\n\nfrom Crypto import Random\nfrom Crypto.Cipher import PKCS1_v1_5, AES\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Random import random as rand\nfrom Crypto.Util import Counter\n\n\nclass AESBlockCipher:\n\n class Size(IntEnum):\n bit_128 = 1\n bit_192 = 2\n bit_256 = 3\n\n supported_mode = [AES.MODE_CBC, AES.MODE_CTR]\n\n def __init__(self, keysize=Size.bit_256, mode=AES.MODE_CBC, secret=None, salt=None):\n \"\"\"Generate AES key according to the keysize and secret.\n keysize -- int, AES keysize in byte (16, 24, 32).\n secret -- bytes, only used for Password-Based AES cipher.\n \"\"\"\n\n if mode not in self.supported_mode:\n raise None # TODO raise error\n\n self.aes = None\n self.MODE = mode\n\n if keysize not in list(map(int, self.Size)):\n raise Exception(\"Invalid key size.\")\n\n keysize = int((128 + 64 * (keysize - 1)) / 8)\n\n if not secret:\n \"\"\"Generate randome key\"\"\"\n self.key = Random.get_random_bytes(keysize)\n else: # Password-Based\n \"\"\"Construct AES key from user secret with argon2 hash function\"\"\"\n import argon2\n\n self.salt = Random.get_random_bytes(16) if salt is None else salt\n time_cost = 2\n memory_cost = 512\n parallelism = 2\n hash_len = keysize\n\n self.key = argon2.low_level.hash_secret_raw(secret, self.salt, time_cost,\n memory_cost, parallelism,\n hash_len, argon2.low_level.Type.I)\n\n def _setup_cipher(self, iv=None):\n # 16-byte, 128-bit initial vector/value for both CBC and CTR mode\n iv = Random.get_random_bytes(AES.block_size) if not iv else iv\n if self.MODE == AES.MODE_CBC:\n return iv, AES.new(self.key, AES.MODE_CBC, iv)\n if self.MODE == AES.MODE_CTR:\n ctr = Counter.new(128, initial_value=int.from_bytes(iv, byteorder=\"big\"))\n return iv, AES.new(self.key, AES.MODE_CTR, counter=ctr)\n\n @staticmethod\n def pad(b):\n return b + \\\n ((AES.block_size - len(b) % AES.block_size) *\n chr(AES.block_size - len(b) % AES.block_size)).encode()\n\n @staticmethod\n def unpad(b):\n return b[:-b[-1]]\n\n # Encrypt a chuck of bytes\n def encrypt(self, b, last=False):\n iv = bytes()\n if not self.aes: # first time, setup iv\n iv, self.aes = self._setup_cipher()\n\n if not last:\n return iv + self.aes.encrypt(b)\n else:\n # if last chunk, pad the chunk\n ret = self.aes.encrypt(self.pad(b))\n self.aes = None\n return iv + ret # return iv also since it could be the only chunk\n\n def decrypt(self, b, last=False):\n if not self.aes: # first time, retrieve iv\n iv = b[:AES.block_size] # both modes have the same iv size\n b = b[len(iv):]\n _, self.aes = self._setup_cipher(iv)\n\n if not last:\n return self.aes.decrypt(b)\n else:\n # if last chunk, unpad the chunk\n dec = self.aes.decrypt(b)\n return self.unpad(dec)\n\n\nclass RSACipher:\n rsa = None\n cipher = None\n\n class Size(IntEnum):\n bit_1024 = 1\n bit_2048 = 2\n bit_4096 = 3\n bit_8192 = 4\n\n def import_key(self, key, passphrase=None):\n # if key is bytes, load it directly\n if isinstance(key, bytes):\n self.rsa = RSA.importKey(key, passphrase)\n else: # else read it as file\n with open(key, \"rb\") as f:\n self.rsa = RSA.importKey(f.read(), passphrase)\n\n self.cipher = PKCS1_v1_5.new(self.rsa)\n\n def export_key(self, keyfile=None, format=\"PEM\", passphrase=None, public_only=False):\n if not hasattr(self, \"rsa\"):\n raise Exception(\"Missing RSA cipher...\")\n\n # if has keyfile, write to key file\n if keyfile:\n with open(keyfile, \"wb\") as f:\n if public_only:\n pubkey = self.rsa.publickey()\n f.write(pubkey.exportKey(format=format))\n else:\n f.write(self.rsa.exportKey(format=format, passphrase=passphrase))\n else: # else return bytes\n if public_only:\n pubkey = self.rsa.publickey()\n return pubkey.exportKey(format=format)\n else:\n return self.rsa.exportKey(format=format, passphrase=passphrase)\n\n def generate(self, keysize=Size.bit_4096, progress_func=None):\n self.progress_func = progress_func\n\n if keysize not in list(map(int, self.Size)):\n raise Exception(\"Invalid key size.\")\n\n keysize = {1: 1024, 2: 2048, 3: 4096, 4: 8192}[keysize]\n self.rsa = RSA.generate(keysize, progress_func=self.gen_prog)\n\n self.cipher = PKCS1_v1_5.new(self.rsa)\n\n self.gen_prog(\"Finished, key size: \" + str(RSACipher.Size(keysize)))\n\n # progress function for generating key\n def gen_prog(self, s):\n d = {\"p,q\": \"Generating p and q primes...\",\n \"u\": \"Generating public exponent...\",\n \"d\": \"Generating private exponent...\"}\n if self.progress_func:\n if s in d.keys():\n self.progress_func(d[s])\n else:\n self.progress_func(s)\n\n # encrypt with rsa key, data size should not exceed n - 11\n def encrypt(self, b):\n if not hasattr(self, \"rsa\"):\n raise Exception(\"Missing RSA cipher...\")\n\n return self.cipher.encrypt(b)\n\n def decrypt(self, b):\n if not hasattr(self, \"rsa\"):\n raise Exception(\"Missing RSA cipher...\")\n\n return self.cipher.decrypt(b, Random.get_random_bytes(rand.randint(0, self.rsa.size())))\n", "sub_path": "ciphers.py", "file_name": "ciphers.py", "file_ext": "py", "file_size_in_byte": 5984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "enum.IntEnum", "line_number": 15, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 20, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 20, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CTR", "line_number": 20, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 22, "usage_type": "name"}, {"api_name": "Crypto.Random.get_random_bytes", "line_number": 41, "usage_type": "call"}, {"api_name": "Crypto.Random", "line_number": 41, "usage_type": "name"}, {"api_name": "Crypto.Random.get_random_bytes", "line_number": 46, "usage_type": "call"}, {"api_name": "Crypto.Random", "line_number": 46, "usage_type": "name"}, {"api_name": "argon2.low_level.hash_secret_raw", "line_number": 52, "usage_type": "call"}, {"api_name": "argon2.low_level", "line_number": 52, "usage_type": "attribute"}, {"api_name": "argon2.low_level", "line_number": 54, "usage_type": "attribute"}, {"api_name": "Crypto.Random.get_random_bytes", "line_number": 58, "usage_type": "call"}, {"api_name": "Crypto.Random", "line_number": 58, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.block_size", "line_number": 58, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 58, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 59, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 59, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 60, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 60, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 60, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.MODE_CTR", "line_number": 61, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 61, "usage_type": "name"}, {"api_name": "Crypto.Util.Counter.new", "line_number": 62, "usage_type": "call"}, {"api_name": "Crypto.Util.Counter", "line_number": 62, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 63, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 63, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CTR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.block_size", "line_number": 68, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 68, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.block_size", "line_number": 69, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 69, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.block_size", "line_number": 91, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 91, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 107, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 116, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 116, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 119, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 119, "usage_type": "name"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5.new", "line_number": 121, "usage_type": "call"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5", "line_number": 121, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.generate", "line_number": 149, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 149, "usage_type": "name"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5.new", "line_number": 151, "usage_type": "call"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5", "line_number": 151, "usage_type": "name"}, {"api_name": "Crypto.Random.get_random_bytes", "line_number": 177, "usage_type": "call"}, {"api_name": "Crypto.Random", "line_number": 177, "usage_type": "name"}, {"api_name": "Crypto.Random.random.randint", "line_number": 177, "usage_type": "call"}, {"api_name": "Crypto.Random.random", "line_number": 177, "usage_type": "name"}]} {"seq_id": "482334241", "text": "from base.socialRecommender import SocialRecommender\nfrom scipy.sparse import *\nimport numpy as np\nfrom numpy import linalg as LA\nfrom joblib import Parallel, delayed\nfrom math import sqrt\nEPS = 1e-8\n# this model refers to the following paper:\n# #########---- Collaborative Filtering with Social Exposure: A Modular Approach to Social Recommendation ----#############\n# SEREC_boost\nclass SERec(SocialRecommender):\n def __init__(self,conf,trainingSet=None,testSet=None,relation=None,fold='[1]'):\n super(SERec, self).__init__(conf,trainingSet,testSet,relation,fold)\n\n def initModel(self):\n super(SERec, self).initModel()\n self.lam_theta = 1e-5\n self.lam_beta = 1e-5\n self.lam_y = 0.01\n self.init_mu = 0.01\n self.a = 1.0\n self.b = 99.0\n self.s= 2.2\n self.init_std = 0.5\n self.theta = self.init_std * \\\n np.random.randn(self.num_users, self.emb_size).astype(np.float32)\n self.beta = self.init_std * \\\n np.random.randn(self.num_items, self.emb_size).astype(np.float32)\n self.mu = self.init_mu * np.ones((self.num_users,self.num_items), dtype=np.float32)\n self.n_jobs=4\n self.batch_size=1000\n row,col,val = [],[],[]\n for user in self.data.trainSet_u:\n for item in self.data.trainSet_u[user]:\n u = self.data.user[user]\n i = self.data.item[item]\n row.append(u)\n col.append(i)\n val.append(1)\n\n self.X = csr_matrix((np.array(val),(np.array(row),np.array(col))),(self.num_users,self.num_items))\n row,col,val = [],[],[]\n for user in self.social.followees:\n for f in self.social.followees[user]:\n u = self.data.user[user]\n i = self.data.user[f]\n row.append(u)\n col.append(i)\n val.append(1)\n self.T = csr_matrix((np.array(val), (np.array(row), np.array(col))), (self.num_users, self.num_users))\n\n def trainModel(self):\n print('training...')\n self._update(self.X)\n\n def _update(self, X):\n '''Model training and evaluation on validation set'''\n n_users = X.shape[0]\n XT = X.T.tocsr() # pre-compute this\n self.vad_ndcg = -np.inf\n for i in range(self.maxEpoch):\n\n print('epoch #%d' % i)\n self._update_factors(X, XT)\n print(self.mu)\n self._update_expo(X, n_users)\n\n def _update_factors(self, X, XT):\n '''Update user and item collaborative factors with ALS'''\n self.theta = recompute_factors(self.beta, self.theta, X,\n self.lam_theta / self.lam_y,\n self.lam_y,\n self.mu,\n self.n_jobs,\n batch_size=self.batch_size)\n\n self.beta = recompute_factors(self.theta, self.beta, XT,\n self.lam_beta / self.lam_y,\n self.lam_y,\n self.mu,\n self.n_jobs,\n batch_size=self.batch_size)\n def _update_expo(self, X, n_users):\n '''Update exposure prior'''\n print('\\tUpdating exposure prior...')\n start_idx = list(range(0, n_users, self.batch_size))\n end_idx = start_idx[1:] + [n_users]\n A_sum = np.zeros(self.num_items)\n for lo, hi in zip(start_idx, end_idx):\n A_sum += a_row_batch(X[lo:hi], self.theta[lo:hi], self.beta,\n self.lam_y, self.mu[lo:hi]).sum(axis=0)\n A_sum=np.tile(A_sum,[self.num_users,1])\n S_sum = self.T.dot(A_sum)\n self.mu = (self.a + A_sum +(self.s-1)*S_sum- 1) / (self.a + self.b + (self.s-1)*S_sum+n_users - 2)\n\n def predictForRanking(self,u):\n 'invoked to rank all the items for the user'\n if self.data.containsUser(u):\n u = self.data.getUserId(u)\n return self.beta.dot(self.theta[u])\n else:\n return [self.data.globalMean] * self.num_items\n\ndef get_row(Y, i):\n '''Given a scipy.sparse.csr_matrix Y, get the values and indices of the\n non-zero values in i_th row'''\n lo, hi = Y.indptr[i], Y.indptr[i + 1]\n return Y.data[lo:hi], Y.indices[lo:hi]\n\ndef a_row_batch(Y_batch, theta_batch, beta, lam_y, mu):\n '''Compute the posterior of exposure latent variables A by batch'''\n pEX = sqrt(lam_y / 2 / np.pi) * \\\n np.exp(-lam_y * theta_batch.dot(beta.T) ** 2 / 2)\n\n A = (pEX + EPS) / (pEX + EPS + (1 - mu) / mu)\n A[Y_batch.nonzero()] = 1.\n return A\n\ndef _solve(k, A_k, X, Y, f, lam, lam_y, mu):\n '''Update one single factor'''\n s_u, i_u = get_row(Y, k)\n a = np.dot(s_u * A_k[i_u], X[i_u])\n B = X.T.dot(A_k[:, np.newaxis] * X) + lam * np.eye(f)\n return LA.solve(B, a)\n\ndef _solve_batch(lo, hi, X, X_old_batch, Y, m, f, lam, lam_y, mu):\n '''Update factors by batch, will eventually call _solve() on each factor to\n keep the parallel process busy'''\n assert X_old_batch.shape[0] == hi - lo\n\n if mu.shape[1] == X.shape[0]: # update users\n A_batch = a_row_batch(Y[lo:hi], X_old_batch, X, lam_y, mu[lo:hi])\n else: # update items\n A_batch = a_row_batch(Y[lo:hi], X_old_batch, X, lam_y, mu.T[lo:hi])\n\n X_batch = np.empty_like(X_old_batch, dtype=X_old_batch.dtype)\n for ib, k in enumerate(range(lo, hi)):\n X_batch[ib] = _solve(k, A_batch[ib], X, Y, f, lam, lam_y, mu)\n return X_batch\n\ndef recompute_factors(X, X_old, Y, lam, lam_y, mu, n_jobs, batch_size=1000):\n '''Regress X to Y with exposure matrix (computed on-the-fly with X_old) and\n ridge term lam by embarrassingly parallelization. All the comments below\n are in the view of computing user factors'''\n m, n = Y.shape # m = number of users, n = number of items\n assert X.shape[0] == n\n assert X_old.shape[0] == m\n f = X.shape[1] # f = number of factors\n\n start_idx = list(range(0, m, batch_size))\n end_idx = start_idx[1:] + [m]\n res = Parallel(n_jobs=n_jobs)(delayed(_solve_batch)(\n lo, hi, X, X_old[lo:hi], Y, m, f, lam, lam_y, mu)\n for lo, hi in zip(start_idx, end_idx))\n X_new = np.vstack(res)\n return X_new\n\n", "sub_path": "model/ranking/SERec.py", "file_name": "SERec.py", "file_ext": "py", "file_size_in_byte": 6423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "base.socialRecommender.SocialRecommender", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.random.randn", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 92, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 124, "usage_type": "name"}, {"api_name": "numpy.empty_like", "line_number": 136, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 152, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 155, "usage_type": "call"}]} {"seq_id": "35122361", "text": "import scrapy\nimport subprocess\nimport json\nimport os\n\n\ndef installPackage(package):\n try:\n return __import__(str(package))\n except ImportError:\n subprocess.call([\"pip3\", \"install\", \"scrapy\"])\n\n\n# Make a dicrectory to contain JSON file and HTML file\nclass makeDir():\n def JSON_dir(self):\n JSON_path = os.path.join(os.getcwd(), \"JSON Image file/\")\n\n if not os.path.exists(JSON_path):\n os.mkdir(JSON_path)\n\n return JSON_path\n\n def HTML_dir(self):\n HTML_path = JSON_path = os.path.join(os.getcwd(), \"HTML File/\")\n\n if not os.path.exists(HTML_path):\n os.mkdir(HTML_path)\n\n return HTML_path\n\n\ndir = makeDir()\nJSON_directory = dir.JSON_dir()\nHTML_directory = dir.HTML_dir()\n\n\n# For dantri.com\nclass covid_dantri(scrapy.Spider):\n name = \"covid_dantri\"\n\n def start_requests(self):\n urls = [\n \"https://dantri.com.vn/suc-khoe/dai-dich-covid-19.htm\",\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n page = response.url.split(\"/\")[-1].split(\".\")[0].split(\".\")[0]\n filename = f'{HTML_directory}dantri-{page}.html'\n with open(filename, 'wb') as f:\n f.write(response.body)\n self.log(f'Saved file {filename}')\n\n # Writing source and caption of image into JSON file\n dantri_covid = {'img': []}\n\n for image in response.css(\"img\"):\n dantri_covid['img'].append({\n \"source\": image.attrib[\"src\"],\n \"caption\": image.attrib[\"alt\"],\n })\n\n with open(f\"{JSON_directory}/image_dantri.json\", \"w\") as image_file:\n json.dump(dantri_covid, image_file)\n\n\n# For tuoitre.net\nclass covid_tuoitre(scrapy.Spider):\n name = \"covid_tuoitre\"\n\n def start_requests(self):\n urls = [\n \"https://tuoitre.vn/phong-chong-covid-19-e583.htm\",\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n page = response.url.split(\"/\")[-1].split(\".\")[0].split(\".\")[0]\n filename = f'{HTML_directory}tuoitre-{page}.html'\n with open(filename, 'wb') as f:\n f.write(response.body)\n self.log(f'Saved file {filename}')\n\n tuoitre_covid = {'img': []}\n\n source = response.css('img::attr(src)').extract()\n caption = response.css('img::attr(alt)').extract()\n\n for i in range(0, len(caption)):\n tuoitre_covid['img'].append({\n \"source\": source[i],\n \"caption\": caption[i],\n })\n\n with open(f\"{JSON_directory}image_tuoitre.json\", \"w\") as image_file:\n json.dump(tuoitre_covid, image_file)\n\n\n# For vnexpress.net\nclass covid_vnexpress(scrapy.Spider):\n name = \"covid_vnexpress\"\n\n def start_requests(self):\n urls = [\n \"https://vnexpress.net/covid-19/tin-tuc\",\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n page = response.url.split(\"/\")[-2]\n filename = f'{HTML_directory}vnexpress-{page}.html'\n with open(filename, 'wb') as f:\n f.write(response.body)\n self.log(f'Saved file {filename}')\n\n # Writing source and caption of image into JSON file\n vnexpress_covid = {'img': []}\n\n for image in response.css(\"img\"):\n vnexpress_covid['img'].append({\n \"source\": image.attrib[\"src\"],\n \"caption\": image.attrib[\"alt\"],\n })\n\n with open(f\"{JSON_directory}image_vnexpress.json\", \"w\") as image_file:\n json.dump(vnexpress_covid, image_file)\n\n\ndef main():\n # Install Scrapy if not have\n installPackage(scrapy)\n\n # Crawl image and caption about Covid19 News\n # From dantri.com\n subprocess.call([\"scrapy\", \"crawl\", \"covid_dantri\"])\n\n # From tuoitre.net\n subprocess.call([\"scrapy\", \"crawl\", \"covid_tuoitre\"])\n\n # From vnexpress.net\n subprocess.call([\"scrapy\", \"crawl\", \"covid_vnexpress\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "covid.py", "file_name": "covid.py", "file_ext": "py", "file_size_in_byte": 4157, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "subprocess.call", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 28, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 66, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 70, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 99, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 103, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 111, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 139, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 142, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 145, "usage_type": "call"}]} {"seq_id": "386290743", "text": "import collections\nimport heapq\nfrom typing import List\nimport time\n\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n nums.sort(reverse=True)\n maxK = 0\n for i in range(0, k):\n maxK = nums[i]\n return maxK\n\n\ndef main():\n sol = Solution()\n inputs = [[3,2,1,5,6,4], [3,2,3,1,2,4,5,5,6]]\n ks = [2, 4]\n outputs = [5, 4]\n\n for i in range(len(inputs)):\n start = time.time()\n res = sol.findKthLargest(inputs[i], ks[i])\n end = time.time()\n if outputs[i] == res:\n print(\"true : \", end-start, \" sec\")\n else:\n print(\"false : \", end-start, \" sec\")\n\n\nif __name__ == \"__main__\":\n main()\n\n", "sub_path": "leetcode_ex/algorithm/2_Medium/0215_KthLargestElementInArray/0215_kth_largest_element_in_array_using_reversesort.py", "file_name": "0215_kth_largest_element_in_array_using_reversesort.py", "file_ext": "py", "file_size_in_byte": 716, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}]} {"seq_id": "525410334", "text": "import os\nimport shutil\nimport copy\nimport pickle\nimport math\nimport skimage\nimport sklearn\nimport scipy\nimport numpy as np\n\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\n\nimport skimage.io as io\nfrom skimage.transform import rescale\nfrom skimage.feature import blob_log\nfrom skimage.morphology import closing, square\nfrom skimage.filters import threshold_otsu\nfrom skimage.measure import label\n\nimport skan\nfrom skan import skeleton_to_csgraph\n\nfrom sklearn import preprocessing\nfrom sklearn import linear_model\nfrom sklearn import decomposition\nfrom sklearn.cluster import KMeans\n\n\n\nclass Cell:\n \"\"\"\n Extract the individual cell by thresholding and removing background noise. \n \"\"\"\n\n def __init__(self, cell_image, image_type, reference_image=None):\n \"\"\"\n Args:\n\n cell_image: RGB cell image\n image_type: 'confocal' or 'DAB'\n reference_image: all images can be standardised to the exposure level of this example cell image\n\n \"\"\"\n\n self.cell_image = cell_image\n self.image_type = image_type\n self.gray_cell_image = skimage.color.rgb2gray(self.cell_image)\n self.inverted_gray_cell_image = skimage.util.invert(self.gray_cell_image)\n self.reference_image = reference_image\n self.threshold_image = self.threshold_image()\n self.inverted_threshold_image = skimage.util.invert(self.threshold_image)\n self.cleaned_image = self.remove_small_object_noise()\n self.cleaned_image_filled_holes = self.fill_holes()\n\n\n def get_blobs(self):\n # Extracts circular blobs in cell image for finding soma later\n\n if self.image_type == \"DAB\":\n blobs_log = blob_log(self.inverted_gray_cell_image, min_sigma=6, max_sigma=20, num_sigma=10, threshold=0.1, overlap=0.5)\n elif self.image_type == \"confocal\":\n blobs_log = blob_log(self.cell_image, min_sigma=3, max_sigma=20, num_sigma=10, threshold=0.1, overlap=0.5)\n\n def eliminate_border_blobs(blobs_log):\n # find the blobs too close to border so as to eliminate them\n blobs_dict = defaultdict()\n for i, blob in enumerate(blobs_log):\n blobs_dict[i] = np.take(blob, [0,1,3])\n y, x, r = blobs_dict[i]\n image_border_x, image_border_y = self.cell_image.shape[1]/5, self.cell_image.shape[0]/5\n if x < image_border_x or x > 4*image_border_x or y < image_border_y or y > 4*image_border_y:\n blobs_dict.pop(i)\n blobs_log=[]\n for key, blobs in blobs_dict.items():\n blobs_log.append(blobs)\n return blobs_log\n\n blobs_log = eliminate_border_blobs(blobs_log)\n\n if len(blobs_log)<1:\n # if none of the blobs remain after border blob elimination, try blob_log with less stringent parameters \n blobs_log = blob_log(self.cell_image, min_sigma=2, max_sigma=20, num_sigma=10, threshold=0.1, overlap=0.5)\n blobs_log = eliminate_border_blobs(blobs_log)\n\n\n return blobs_log\n\n\n def centre_of_mass(self, blobs):\n # finds centre of mass of the multiple blobs detected\n\n # find the blob with highest intensity value\n ixs = np.indices(self.gray_cell_image.shape)\n\n blob_intensities=[]\n blob_centres=[]\n blob_radiuses=[]\n\n for blob in blobs:\n y, x, r = blob\n # Define an array of shape `[2, 1, 1]`, containing the center of the blob\n blob_center = np.array([y, x])[:, np.newaxis, np.newaxis]\n # Using the formula for a circle, `x**2 + y**2 < r**2`, generate a mask for this blob.\n mask = ((ixs - blob_center)**2).sum(axis=0) < r**2\n # Calculate the average intensity of pixels under the mask\n blob_avg_est = self.gray_cell_image[mask].mean()\n blob_intensities.append(blob_avg_est)\n blob_centres.append((y, x))\n blob_radiuses.append(r)\n\n if self.image_type == \"DAB\":\n max_intensity = blob_centres[np.argmin(blob_intensities)]\n\n return max_intensity\n\n elif self.image_type == \"confocal\":\n max_radius = blob_centres[np.argmax(blob_radiuses)]\n max_intensity = blob_centres[np.argmax(blob_intensities)]\n if len(blob_radiuses) > len(set(blob_radiuses)):\n return max_intensity\n else:\n return max_radius\n\n\n def get_soma(self):\n # calculate pixel position to be attribute as soma\n\n soma_blobs = self.get_blobs()\n\n if len(soma_blobs)==1:\n soma = soma_blobs[0][:2]\n if len(soma_blobs)>1:\n soma = self.centre_of_mass(soma_blobs)\n\n return soma\n\n def threshold_image(self):\n if self.reference_image is not None:\n self.gray_reference_image = skimage.color.rgb2gray(self.reference_image)\n self.gray_cell_image = skimage.transform.match_histograms(self.gray_cell_image, self.gray_reference_image)\n\n # Contrast stretching\n p2, p98 = np.percentile(self.gray_cell_image, (2, 98))\n img_rescale = skimage.exposure.rescale_intensity(self.gray_cell_image, in_range=(p2, p98))\n\n thresholded_cell = img_rescale > threshold_otsu(img_rescale)\n\n if self.image_type == \"DAB\":\n return thresholded_cell\n elif self.image_type == \"confocal\":\n return skimage.util.invert(thresholded_cell)\n\n\n def label_objects(self):\n bw = closing(self.inverted_threshold_image, square(1))\n # label image regions\n labelled_image, no_of_objects = skimage.measure.label(bw, return_num=True)\n \n return labelled_image, no_of_objects \n\n\n def remove_small_object_noise(self):\n labelled_image, no_of_objects = self.label_objects()\n object_areas = []\n for object_label in range(1, no_of_objects+1):\n # for object_label in range(no_of_objects):\n object_areas.append(len(np.where(labelled_image==[object_label])[0]))\n \n largest_object_label = np.argmax(object_areas)+1\n astrocyte_image = np.where(labelled_image==[largest_object_label], 1, 0)\n \n return astrocyte_image\n\n\n def fill_holes(self):\n return scipy.ndimage.binary_fill_holes(self.cleaned_image).astype(int)\n\n\n def surface_area(self):\n return np.sum(self.cleaned_image)\n\n\n\nclass Skeleton:\n \"\"\"\n Skeletonize the thresholded image and extract relevant features from the 2D skeleton.\n \"\"\"\n\n def __init__(self, cell_image, image_type):\n \"\"\"\n Args:\n\n cell_image: RGB cell image\n image_type: 'confocal' or 'DAB'\n\n \"\"\"\n\n self.cell_image = cell_image\n self.astrocyte = Cell(cell_image, image_type)\n self.cleaned_image = self.astrocyte.cleaned_image_filled_holes\n self.soma = self.astrocyte.get_soma()\n self.cell_skeleton = self.skeletonization()\n self.soma_on_skeleton = self.get_soma_on_skeleton()\n self.padded_skeleton = self.pad_skeleton()\n self.classify_branching_structure()\n\n\n def distance(self, P1, P2):\n # find eucledian distance between two pixel positions\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5\n\n\n def skeletonization(self):\n # perform skeletonization\n return skimage.morphology.skeletonize(self.cleaned_image) \n\n\n def pad_skeleton(self):\n\n # get all the pixel indices representing skeleton\n skeleton_indices = np.nonzero(self.cell_skeleton)\n\n # get corner points enclosing skeleton\n x_min, x_max = min(skeleton_indices[1]), max(skeleton_indices[1])\n y_min, y_max = min(skeleton_indices[0]), max(skeleton_indices[0])\n self.bounded_skeleton = self.cell_skeleton[y_min:y_max, x_min:x_max]\n\n pad_width = max(self.bounded_skeleton.shape)//2\n self.bounded_skeleton_boundary = [x_min, x_max, y_min, y_max]\n\n # get updated soma position on bounded and padded skeleton\n self.soma_on_bounded_skeleton = self.soma_on_skeleton[0]-y_min, self.soma_on_skeleton[1]-x_min\n self.soma_on_padded_skeleton = self.soma_on_skeleton[0]-y_min+pad_width, self.soma_on_skeleton[1]-x_min+pad_width\n\n return skimage.util.pad(self.bounded_skeleton, pad_width=pad_width, mode='constant')\n\n\n def get_soma_on_skeleton(self):\n skeleton_pixel_coordinates = [(i, j) for (i, j), val in np.ndenumerate(self.cell_skeleton) if val!=0]\n soma_on_skeleton = min(skeleton_pixel_coordinates, key=lambda x: self.distance(self.soma, x))\n\n return soma_on_skeleton\n\n\n def total_length(self):\n return np.sum(self.cell_skeleton)\n\n\n def avg_process_thickness(self):\n return round((self.astrocyte.surface_area()/self.total_length()), 1)\n\n\n def convex_hull(self, plot=False):\n convex_hull = skimage.morphology.convex_hull_image(self.cell_skeleton)\n if plot==True:\n fig, ax = plt.subplots()\n ax.set_axis_off()\n ax.imshow(convex_hull)\n \n return np.sum(convex_hull)\n\n\n def get_no_of_forks(self, plot=False):\n\n # get the degree for every cell pixel (no. of neighbouring pixels) \n pixel_graph, coordinates, degrees = skeleton_to_csgraph(self.cell_skeleton)\n # array of all pixel locations with degree more than 2\n fork_image = np.where(degrees > [2], 1, 0)\n s = scipy.ndimage.generate_binary_structure(2,2)\n labeled_array, num_forks = scipy.ndimage.label(fork_image, structure=s)\n \n if plot==True:\n fork_indices = np.where(degrees > [2])\n fork_coordinates = zip(fork_indices[0], fork_indices[1])\n\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.set_title('path')\n ax.imshow(self.cell_skeleton, interpolation='nearest')\n\n for i in fork_coordinates:\n c = plt.Circle((i[1], i[0]), 0.6, color='green')\n ax.add_patch(c)\n\n ax.set_axis_off()\n plt.tight_layout()\n plt.show()\n\n return num_forks\n\n\n def eliminate_loops(self, branch_statistics, paths_list):\n loop_indexes=[]\n loop_branch_end_points=[]\n \n # set that keeps track of what elements have been added\n seen = set()\n # eliminate loops from branch statistics\n for branch_no, branch in enumerate(branch_statistics):\n # If element not in seen, add it to both\n current = (branch[0], branch[1])\n if current not in seen:\n seen.add(current)\n elif current in seen:\n # for deleting the loop index from branch statistics\n loop_indexes.append(branch_no)\n # for deleting the paths from paths list by recognizing loop end points\n loop_branch_end_points.append((int(branch[0]), int(branch[1])))\n\n new_branch_statistics = np.delete(branch_statistics, loop_indexes, axis=0)\n\n # eliminate loops from paths list\n path_indexes=[]\n for loop_end_points in loop_branch_end_points:\n for path_no, path in enumerate(paths_list):\n if loop_end_points[0]==path[0] and loop_end_points[1]==path[-1] or loop_end_points[0]==path[-1] and loop_end_points[1]==path[0]:\n path_indexes.append(path_no)\n break\n\n new_paths_list = np.delete(np.array(paths_list), path_indexes, axis=0)\n \n return new_branch_statistics, new_paths_list\n\n\n def branch_structure(self, junctions, branch_statistics, paths_list):\n next_set_junctions = []\n next_set_branches = []\n terminal_branches=[]\n\n for junction in junctions:\n branches_travelled = []\n for branch_no, branch in enumerate(branch_statistics):\n if branch[0]==junction:\n if branch[3]==2:\n next_set_junctions.append(branch[1])\n for path in paths_list:\n if branch[0]==path[0] and branch[1]==path[-1] or branch[0]==path[-1] and branch[1]==path[0]:\n next_set_branches.append(path)\n branches_travelled.append(branch_no)\n if branch[3]==1:\n for path in paths_list:\n if branch[0]==path[0] and branch[1]==path[-1] or branch[0]==path[-1] and branch[1]==path[0]:\n terminal_branches.append(path)\n next_set_branches.append(path)\n branches_travelled.append(branch_no)\n elif branch[1]==junction:\n if branch[3]==2:\n next_set_junctions.append(branch[0])\n for path in paths_list:\n if branch[0]==path[0] and branch[1]==path[-1] or branch[0]==path[-1] and branch[1]==path[0]:\n next_set_branches.append(path)\n branches_travelled.append(branch_no)\n if branch[3]==1:\n for path in paths_list:\n if branch[0]==path[0] and branch[1]==path[-1] or branch[0]==path[-1] and branch[1]==path[0]:\n terminal_branches.append(path)\n next_set_branches.append(path)\n branches_travelled.append(branch_no)\n branch_statistics = np.delete(branch_statistics, branches_travelled, axis=0)\n \n return next_set_junctions, next_set_branches, terminal_branches, branch_statistics\n\n\n def classify_branching_structure(self, plot=False):\n\n def get_soma_node():\n near = []\n for i in range(skan.csr.Skeleton(self.cell_skeleton).n_paths):\n path_coords = skan.csr.Skeleton(self.cell_skeleton).path_coordinates(i)\n nearest = min(path_coords, key=lambda x: self.distance(self.soma_on_skeleton, x))\n near.append(nearest)\n\n soma_on_path = min(near, key=lambda x: self.distance(self.soma_on_skeleton, x))\n\n for i,j in enumerate(skan.csr.Skeleton(self.cell_skeleton).coordinates):\n if all(soma_on_path==j):\n soma_node = [i]\n break\n\n return soma_node \n\n def get_soma_branches(soma_node, paths_list): \n soma_branches=[]\n for path in paths_list:\n # print(path)\n if soma_node in path:\n soma_branches.append(path)\n return soma_branches\n\n\n pixel_graph, coordinates, degrees = skeleton_to_csgraph(self.cell_skeleton)\n branch_statistics = skan.csr.branch_statistics(pixel_graph)\n paths_list = skan.csr.Skeleton(self.cell_skeleton).paths_list()\n \n terminal_branches = []\n branching_structure_array = []\n # get branches containing soma node\n\n soma_node = get_soma_node()\n soma_branches = get_soma_branches(soma_node, paths_list)\n if len(soma_branches)>2:\n junctions = soma_node\n delete_soma_branch=False\n else:\n # collect first level/primary branches\n junctions = [soma_branches[0][0], soma_branches[0][-1]]\n delete_soma_branch=True\n \n # eliminate loops in branches and path lists\n branch_statistics, paths_list = self.eliminate_loops(branch_statistics, paths_list)\n \n while True:\n junctions, branches, terminal_branch, branch_statistics = self.branch_structure(junctions, branch_statistics, paths_list)\n branching_structure_array.append(branches)\n terminal_branches.extend(terminal_branch)\n if len(junctions)==0:\n break\n\n if delete_soma_branch==True:\n branching_structure_array[0].remove(soma_branches[0])\n\n \n if plot==True:\n # store same level branch nodes in single array \n color_branches_coords=[]\n for branch_level in branching_structure_array:\n single_branch_level=[]\n for path in branch_level:\n path_coords=[]\n for node in path:\n path_coords.append(coordinates[node])\n single_branch_level.extend(path_coords)\n color_branches_coords.append(single_branch_level)\n\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.set_title('path')\n ax.imshow(self.cell_skeleton, interpolation='nearest')\n\n color_codes = ['red', 'blue', 'magenta', 'green', 'cyan']\n for j, color_branch in enumerate(color_branches_coords):\n if j>4:\n j=4\n for k in color_branch:\n c = plt.Circle((k[1], k[0]), 0.5, color=color_codes[j])\n ax.add_patch(c) \n \n ax.set_axis_off()\n plt.tight_layout()\n plt.show()\n \n self.branching_structure_array = branching_structure_array\n self.terminal_branches = terminal_branches\n\n\n def get_primary_branches(self):\n primary_branches = self.branching_structure_array[0]\n no_of_primary_branches = len(primary_branches)\n avg_length_of_primary_branches = 0 if no_of_primary_branches == 0 else sum(map(len, primary_branches))/float(len(primary_branches))\n \n return primary_branches, no_of_primary_branches, round(avg_length_of_primary_branches, 1)\n\n\n def get_secondary_branches(self):\n try:\n secondary_branches = self.branching_structure_array[1]\n except IndexError:\n secondary_branches=[]\n no_of_secondary_branches = len(secondary_branches)\n avg_length_of_secondary_branches = 0 if no_of_secondary_branches == 0 else sum(map(len, secondary_branches))/float(len(secondary_branches))\n \n return secondary_branches, no_of_secondary_branches, round(avg_length_of_secondary_branches, 1)\n\n\n def get_tertiary_branches(self):\n try:\n tertiary_branches = self.branching_structure_array[2]\n except IndexError:\n tertiary_branches=[]\n no_of_tertiary_branches = len(tertiary_branches)\n avg_length_of_tertiary_branches = 0 if no_of_tertiary_branches == 0 else sum(map(len, tertiary_branches))/float(len(tertiary_branches))\n \n return tertiary_branches, no_of_tertiary_branches, round(avg_length_of_tertiary_branches, 1)\n\n\n def get_quatenary_branches(self):\n try:\n quatenary_branches = self.branching_structure_array[3:]\n except IndexError:\n quatenary_branches=[]\n quatenary_branches = [branch for branch_level in quatenary_branches for branch in branch_level]\n no_of_quatenary_branches = len(quatenary_branches)\n avg_length_of_quatenary_branches = 0 if no_of_quatenary_branches == 0 else sum(map(len, quatenary_branches))/float(len(quatenary_branches))\n \n return quatenary_branches, no_of_quatenary_branches, round(avg_length_of_quatenary_branches, 1)\n\n\n def get_terminal_branches(self):\n terminal_branches = self.terminal_branches\n no_of_terminal_branches = len(terminal_branches)\n avg_length_of_terminal_branches = 0 if no_of_terminal_branches == 0 else sum(map(len, terminal_branches))/float(len(terminal_branches))\n \n return terminal_branches, no_of_terminal_branches, round(avg_length_of_terminal_branches, 1)\n\n\n\nclass Sholl:\n \"\"\"\n Extract radius and no. of intersections for sholl analyses and other relevant features from the resulting sholl plot. \n \"\"\"\n\n def __init__(self, cell_image, image_type, shell_step_size, polynomial_degree=3):\n\n \"\"\"\n Args:\n \n cell_image: RGB cell image\n image_type: 'confocal' or 'DAB'\n shell_step_size: pixel difference between concentric circles for sholl analysis\n polynomial_degree (scalar): degree of polynomial for fitting regression model on sholl values\n\n \"\"\"\n\n self.shell_step_size = shell_step_size\n self.polynomial_degree = polynomial_degree\n self.skeleton = Skeleton(cell_image, image_type)\n self.bounded_skeleton = self.skeleton.bounded_skeleton\n self.soma_on_bounded_skeleton = self.skeleton.soma_on_bounded_skeleton\n self.padded_skeleton = self.skeleton.padded_skeleton\n self.soma_on_padded_skeleton = self.skeleton.soma_on_padded_skeleton\n self.distances_from_soma = self.sholl_results()[0]\n self.no_of_intersections = self.sholl_results()[1]\n self.polynomial_model = self.polynomial_fit()\n self.determination_ratio()\n\n \n def concentric_coords_and_values(self):\n # concentric_coordinates: {radius values: [pixel coordinates on that radius]}\n # no_of_intersections: {radius values: no_of_intersection values}\n\n largest_radius = int(1.3*(np.max([self.soma_on_bounded_skeleton[1], abs(self.soma_on_bounded_skeleton[1]-self.bounded_skeleton.shape[1]), \n self.soma_on_bounded_skeleton[0], abs(self.soma_on_bounded_skeleton[0]-self.bounded_skeleton.shape[0])])))\n concentric_coordinates = defaultdict(list) # {100: [(10,10), ..] , 400: [(20,20), ..]}\n concentric_coordinates_intensities = defaultdict(list)\n concentric_radiuses = [radius for radius in range(self.shell_step_size, largest_radius, self.shell_step_size)]\n\n for (x, y), value in np.ndenumerate(self.padded_skeleton):\n for radius in concentric_radiuses:\n lhs = (x - self.soma_on_padded_skeleton[0])**2 + (y - self.soma_on_padded_skeleton[1])**2\n if abs((math.sqrt(lhs)-radius)) < 0.9:\n concentric_coordinates[radius].append((x, y))\n concentric_coordinates_intensities[radius].append(value)\n\n # array with intersection values corresponding to radii\n no_of_intersections = defaultdict()\n for radius, val in concentric_coordinates_intensities.items():\n intersec_indicies=[]\n indexes = [i for i, x in enumerate(val) if x]\n for index in indexes:\n intersec_indicies.append(concentric_coordinates[radius][index])\n img = np.zeros(self.padded_skeleton.shape)\n intersections = []\n for i, j in enumerate(intersec_indicies):\n img[j] = 1\n label_image = label(img)\n no_of_intersections[radius] = np.amax(label_image)\n\n return concentric_coordinates, no_of_intersections\n\n\n def sholl_results(self, plot=False):\n # return sholl radiuses and corresponding intersection values\n xs, ys = [], []\n concentric_coordinates, no_of_intersections = self.concentric_coords_and_values()\n for rad, val in no_of_intersections.items():\n xs.append(rad)\n ys.append(val)\n order = np.argsort(xs)\n\n if plot==True:\n astrocyte_skeleton_copy = copy.deepcopy(self.padded_skeleton)\n for radius, coordinates in concentric_coordinates.items():\n for coord in coordinates:\n cell_image_with_circles = astrocyte_skeleton_copy\n cell_image_with_circles[coord[0],coord[1]]=1\n\n # plot circles on skeleton\n fig, ax = plt.subplots(figsize=(10, 6))\n ax.imshow(cell_image_with_circles)\n # overlay soma on skeleton\n y, x = self.soma_on_padded_skeleton\n c = plt.Circle((x, y), 1, color='red')\n ax.add_patch(c)\n ax.set_axis_off()\n plt.tight_layout()\n plt.show()\n\n # plot sholl graph showing radius vs. no_of_intersections \n plt.plot(self.distances_from_soma, self.no_of_intersections)\n plt.xlabel(\"Distance from centre\")\n plt.ylabel(\"No. of intersections\") \n plt.show()\n\n return np.array(xs)[order], np.array(ys)[order]\n\n\n def polynomial_fit(self, plot=False):\n # Linear polynomial regression to describe the relationship between intersections vs. distance\n\n # till last non-zero value\n last_intersection_index = np.max(np.nonzero(self.no_of_intersections))\n self.non_zero_no_of_intersections = self.no_of_intersections[:last_intersection_index]\n self.non_zero_distances_from_soma = self.distances_from_soma[:last_intersection_index]\n\n y_data = self.non_zero_no_of_intersections\n reshaped_x = self.non_zero_distances_from_soma.reshape((-1, 1))\n\n x_ = preprocessing.PolynomialFeatures(degree=self.polynomial_degree, include_bias=False).fit_transform(reshaped_x)\n # create a linear regression model\n polynomial_model = linear_model.LinearRegression().fit(x_, y_data)\n\n self.polynomial_predicted_no_of_intersections = polynomial_model.predict(x_)\n\n if plot==True:\n # predict y from the data\n x_new = self.non_zero_distances_from_soma\n y_new = polynomial_model.predict(x_)\n # plot the results\n plt.figure(figsize=(4, 3))\n ax = plt.axes()\n ax.scatter(reshaped_x, y_data)\n ax.plot(x_new, y_new)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.axis('tight')\n plt.show()\n\n return polynomial_model\n\n\n def enclosing_radius(self):\n # index of last non-zero value in the array containing radii\n return self.non_zero_distances_from_soma[len(self.non_zero_no_of_intersections) - (self.non_zero_no_of_intersections!=0)[::-1].argmax() - 1]\n \n\n def critical_radius(self):\n # radii_array[index of the max value in the array for no_of_intersections (polynomial plot)]\n return self.non_zero_distances_from_soma[np.argmax(self.polynomial_predicted_no_of_intersections)]\n \n\n def critical_value(self):\n # local maximum of the polynomial fit (Maximum no. of intersections)\n return round(np.max(self.polynomial_predicted_no_of_intersections), 2)\n \n\n def skewness(self):\n # indication of how symmetrical the polynomial distribution is around its mean \n x_ = preprocessing.PolynomialFeatures(degree=3, include_bias=False).fit_transform(self.non_zero_no_of_intersections.reshape((-1, 1)))\n return round(scipy.stats.skew(self.polynomial_model.predict(x_)), 2)\n \n\n def schoenen_ramification_index(self):\n # ratio between critical value and number of primary branches\n no_of_primary_branches = self.skeleton.get_primary_branches()[1]\n schoenen_ramification_index = self.critical_value()/no_of_primary_branches\n return round(schoenen_ramification_index, 2)\n \n\n def semi_log(self):\n # no. of intersections/circumference\n normalized_y = np.log(self.non_zero_no_of_intersections/(2*math.pi*self.non_zero_distances_from_soma))\n reshaped_x = self.non_zero_distances_from_soma.reshape((-1, 1))\n model = linear_model.LinearRegression().fit(reshaped_x, normalized_y)\n\n # predict y from the data\n x_new = self.non_zero_distances_from_soma\n y_new = model.predict(reshaped_x)\n r2 = model.score(reshaped_x, normalized_y)\n regression_intercept = model.intercept_\n regression_coefficient = -model.coef_[0]\n\n return r2, regression_intercept, regression_coefficient\n \n\n def log_log(self):\n # no. of intersections/circumference\n normalized_y = np.log(self.non_zero_no_of_intersections/(2*math.pi*self.non_zero_distances_from_soma))\n reshaped_x = self.non_zero_distances_from_soma.reshape((-1, 1))\n normalized_x = np.log(reshaped_x)\n model = linear_model.LinearRegression().fit(normalized_x, normalized_y)\n\n # predict y from the data\n x_new = normalized_x\n y_new = model.predict(normalized_x)\n r2 = model.score(normalized_x, normalized_y)\n regression_intercept = model.intercept_\n regression_coefficient = -model.coef_[0]\n\n return r2, regression_intercept, regression_coefficient\n \n\n def determination_ratio(self):\n semi_log_r2 = self.semi_log()[0]\n log_log_r2 = self.log_log()[0]\n determination_ratio = semi_log_r2/log_log_r2\n if determination_ratio>1:\n self.normalization_method=\"Semi-log\"\n else:\n self.normalization_method=\"Log-log\"\n \n\n def coefficient_of_determination(self):\n # how close the data are to the fitted regression (indicative of the level of explained variability in the data set)\n if self.normalization_method==\"Semi-log\":\n return round(self.semi_log()[0], 2)\n else:\n return round(self.log_log()[0], 2)\n \n\n def regression_intercept(self):\n # Y intercept of the logarithmic plot\n if self.normalization_method==\"Semi-log\":\n return round(self.semi_log()[1], 2)\n else:\n return round(self.log_log()[1], 2)\n\n\n def sholl_regression_coefficient(self):\n # rate of decay of no. of branches\n if self.normalization_method==\"Semi-log\":\n return round(self.semi_log()[2], 2)\n else:\n return round(self.log_log()[2], 2)\n\n \n\nclass analyze_cells:\n \"\"\"\n Extract features of all the cells in different groups and implement PCA and group level sholl analysis.\n \"\"\"\n\n def __init__(self, groups_folders, image_type, label, save_features=True, show_sholl_plot=True, shell_step_size=3):\n\n \"\"\"\n Args:\n\n groups_folders (list): name of the input data folders corresponding to different subgroups\n image_type (string): 'confocal' or 'DAB'\n label (dictionary): group labels to be used for pca plots\n save_features (True/False): create text files containing feature values\n show_sholl_plot (True/False): show group sholl plot\n shell_step_size (scalar): difference between concentric circles\n\n \"\"\"\n\n self.show_sholl_plot = show_sholl_plot\n self.image_type = image_type\n self.label = label\n self.shell_step_size = shell_step_size\n\n dataset = self.read_images(groups_folders)\n self.features = self.get_features(dataset)\n self.feature_names = ['surface_area', 'total_length', 'avg_process_thickness', 'convex_hull', 'no_of_forks', 'no_of_primary_branches', 'no_of_secondary_branches', \n 'no_of_tertiary_branches', 'no_of_quatenary_branches', 'no_of_terminal_branches', 'avg_length_of_primary_branches', 'avg_length_of_secondary_branches', \n 'avg_length_of_tertiary_branches', 'avg_length_of_quatenary_branches', 'avg_length_of_terminal_branches', \n 'critical_radius', 'critical_value', 'enclosing_radius', 'ramification_index', 'skewness', 'coefficient_of_determination', \n 'sholl_regression_coefficient', 'regression_intercept']\n if save_features==True:\n self.save_features()\n if show_sholl_plot==True:\n self.show_avg_sholl_plot(shell_step_size)\n\n\n def read_images(self, groups_folders):\n self.file_names=[]\n dataset=[]\n for group in groups_folders:\n group_data=[]\n for file in os.listdir(group):\n if not file.startswith('.'):\n self.file_names.append((group+'/'+file))\n image = io.imread(group+'/'+file)\n group_data.append(image)\n dataset.append(group_data)\n\n return dataset\n\n\n def get_features(self, dataset):\n dataset_features=[]\n self.targets=[]\n\n if self.show_sholl_plot==True:\n self.sholl_original_plots=[]\n self.sholl_polynomial_plots=[]\n self.polynomial_models=[]\n \n self.group_counts=[]\n cell_count=0\n for group_no, group in enumerate(dataset):\n group_cell_count=0\n for cell_no, cell_image in enumerate(group):\n\n print(self.file_names[cell_count])\n\n cell_count+=1\n group_cell_count+=1\n\n self.targets.append(group_no)\n\n cell_features=[]\n\n astrocyte = Cell(cell_image, self.image_type)\n skeleton = Skeleton(cell_image, self.image_type)\n sholl = Sholl(cell_image, self.image_type, self.shell_step_size)\n\n cell_features.append(astrocyte.surface_area())\n cell_features.append(skeleton.total_length())\n cell_features.append(skeleton.avg_process_thickness())\n cell_features.append(skeleton.convex_hull())\n cell_features.append(skeleton.get_no_of_forks())\n cell_features.append(skeleton.get_primary_branches()[1])\n cell_features.append(skeleton.get_secondary_branches()[1])\n cell_features.append(skeleton.get_tertiary_branches()[1])\n cell_features.append(skeleton.get_quatenary_branches()[1])\n cell_features.append(skeleton.get_terminal_branches()[1])\n cell_features.append(skeleton.get_primary_branches()[2])\n cell_features.append(skeleton.get_secondary_branches()[2])\n cell_features.append(skeleton.get_tertiary_branches()[2])\n cell_features.append(skeleton.get_quatenary_branches()[2])\n cell_features.append(skeleton.get_terminal_branches()[2])\n cell_features.append(sholl.critical_radius())\n cell_features.append(sholl.critical_value())\n cell_features.append(sholl.enclosing_radius())\n cell_features.append(sholl.schoenen_ramification_index())\n cell_features.append(sholl.skewness())\n cell_features.append(sholl.coefficient_of_determination())\n cell_features.append(sholl.sholl_regression_coefficient())\n cell_features.append(sholl.regression_intercept())\n\n if self.show_sholl_plot==True:\n self.sholl_original_plots.append((sholl.distances_from_soma, sholl.no_of_intersections))\n self.sholl_polynomial_plots.append((sholl.non_zero_distances_from_soma, sholl.non_zero_no_of_intersections))\n self.polynomial_models.append(sholl.polynomial_model)\n \n dataset_features.append(cell_features)\n\n self.group_counts.append(group_cell_count)\n return dataset_features\n\n\n def save_features(self):\n directory = os.getcwd()+'/Features'\n if os.path.exists(directory) and os.path.isdir(directory):\n shutil.rmtree(directory)\n os.mkdir(directory)\n else:\n os.mkdir(directory)\n\n def save_to_file(file_name, feature_name, feature_value):\n path = os.getcwd()+'/Features/'\n with open(path+feature_name+'.txt', 'a') as text_file:\n text_file.write(\"{} {} \\n\".format(file_name, feature_value))\n\n for cell_no, cell_features in enumerate(self.features):\n for feature_no, feature_val in enumerate(cell_features):\n save_to_file(self.file_names[cell_no], self.feature_names[feature_no], feature_val)\n\n\n def show_avg_sholl_plot(self, shell_step_size):\n original_plots_file = 'Original plots'\n polynomial_plots_file = 'Polynomial plots'\n\n directory = os.getcwd()+'/Sholl Results'\n\n if os.path.exists(directory) and os.path.isdir(directory):\n shutil.rmtree(directory)\n os.mkdir(directory)\n else:\n os.mkdir(directory)\n\n path = os.getcwd()+'/Sholl Results/'\n\n largest_radius = []\n no_of_intersections=[]\n\n with open(path+original_plots_file, 'w+') as text_file:\n for cell_no, plot in enumerate(self.sholl_original_plots):\n text_file.write(\"{} {} {} \\n\".format(self.file_names[cell_no], plot[0], plot[1]))\n\n # # get the max radius of each cell, as smallest and mid-level ones can be inferred from shell_step_size\n # largest_radius.append(max(plot[0]))\n # no_of_intersections.append(plot[1])\n\n with open(path+polynomial_plots_file, 'w+') as text_file:\n for cell_no, plot in enumerate(self.sholl_polynomial_plots):\n text_file.write(\"{} {} {} \\n\".format(self.file_names[cell_no], plot[0], plot[1]))\n\n # get the max radius of each cell, as smallest and mid-level ones can be inferred from shell_step_size\n largest_radius.append(max(plot[0]))\n no_of_intersections.append(plot[1])\n\n group_radiuses=[]\n sholl_intersections=[]\n for group_no, count in enumerate(self.group_counts):\n group_count = sum(self.group_counts[:group_no+1])\n group_radius = max(largest_radius[group_count-count:group_count])\n group_radiuses.append(group_radius)\n\n current_intersections = no_of_intersections[group_count-count:group_count]\n current_radiuses = range(shell_step_size, group_radius+1, shell_step_size)\n\n intersection_dict = defaultdict(list)\n for intersections in current_intersections:\n for i, intersection_val in enumerate(intersections):\n intersection_dict[current_radiuses[i]].append(intersection_val) \n sholl_intersections.append(intersection_dict)\n\n with open(path+\"Sholl values\", 'w') as text_file:\n for group_no, group_sholl in enumerate(sholl_intersections):\n text_file.write(\"Group: {}\\n\".format(group_no))\n for radius, intersections in group_sholl.items():\n text_file.write(\"{} {}\\n\".format(radius, intersections))\n\n for group_no, group_sholl in enumerate(sholl_intersections):\n x=[]\n y=[]\n e=[]\n for radius, intersections in group_sholl.items():\n x.append(radius)\n intersections = (intersections + self.group_counts[group_no] * [0])[:self.group_counts[group_no]]\n y.append(np.mean(intersections))\n e.append(scipy.stats.sem(intersections))\n plt.errorbar(x, y, yerr=e, label=self.label[group_no])\n\n plt.xlabel(\"Distance from soma\")\n plt.ylabel(\"No. of intersections\")\n plt.legend()\n plt.show()\n\n\n def pca(self, color_dict, marker):\n\n self.marker = marker\n\n def get_cov_ellipse(cov, centre, nstd, **kwargs):\n \"\"\"\n Return a matplotlib Ellipse patch representing the covariance matrix\n cov centred at centre and scaled by the factor nstd.\n\n \"\"\"\n\n # Find and sort eigenvalues and eigenvectors into descending order\n eigvals, eigvecs = np.linalg.eigh(cov)\n order = eigvals.argsort()[::-1]\n eigvals, eigvecs = eigvals[order], eigvecs[:, order]\n\n # The anti-clockwise angle to rotate our ellipse by \n vx, vy = eigvecs[:,0][0], eigvecs[:,0][1]\n theta = np.arctan2(vy, vx)\n\n # Width and height of ellipse to draw\n width, height = nstd * np.sqrt(eigvals)\n\n return Ellipse(xy=centre, width=width, height=height, angle=np.degrees(theta), **kwargs)\n\n\n pca_object = decomposition.PCA(2)\n\n # Scale data\n scaler = preprocessing.MaxAbsScaler()\n scaler.fit(self.features)\n X=scaler.transform(self.features)\n\n # fit on data\n pca_object.fit(X)\n\n # access values and vectors\n self.feature_significance = pca_object.components_\n\n # variance captured by principal components\n first_component_var = pca_object.explained_variance_ratio_[0]\n second_component_var = pca_object.explained_variance_ratio_[1]\n\n # transform data\n self.projected = pca_object.transform(X)\n\n first_component=self.projected[:,0]\n second_component=self.projected[:,1]\n\n with open(\"pca values\", 'w') as text_file:\n text_file.write(\"First component:\\n{}\\nSecond component:\\n{}\".format(first_component, second_component))\n\n\n no_of_std = 3 # no. of standard deviations to show\n fig, ax = plt.subplots()\n fig.patch.set_facecolor('white')\n for l in np.unique(self.targets):\n ix = np.where(self.targets==l)\n first_component_mean = np.mean(first_component[ix])\n second_component_mean = np.mean(second_component[ix])\n cov = np.cov(first_component, second_component)\n ax.scatter(first_component[ix], second_component[ix], c=color_dict[l], s=40, label=self.label[l], marker=marker[l])\n e = get_cov_ellipse(cov, (first_component_mean, second_component_mean), no_of_std, fc=color_dict[l], alpha=0.4)\n ax.add_artist(e)\n\n plt.xlabel(\"PC 1 (Variance: %.1f%%)\" % (first_component_var*100), fontsize=14)\n plt.ylabel(\"PC 2 (Variance: %.1f%%)\" % (second_component_var*100), fontsize=14)\n plt.legend()\n plt.show()\n\n\n def plot_feature_histograms(self):\n fig, axes = plt.subplots(12, 2, figsize=(15, 12)) # 2 columns each containing 13 figures, total 22 features\n data = np.array(self.features)\n ko = data[np.where(np.array(self.targets) == 0)[0]] # define ko\n control = data[np.where(np.array(self.targets) == 1)[0]] # define control\n ax=axes.ravel() # flat axes with numpy ravel\n\n for i in range(len(self.feature_names)):\n _, bins=np.histogram(data[:,i], bins=40)\n ax[i].hist(ko[:,i], bins=bins, color='r',alpha=.5) # red color for malignant class\n ax[i].hist(control[:,i], bins=bins, color='g',alpha=0.3) # alpha is for transparency in the overlapped region \n ax[i].set_title(self.feature_names[i],fontsize=9)\n ax[i].axes.get_xaxis().set_visible(False) # the x-axis co-ordinates are not so useful, as we just want to look how well separated the histograms are\n ax[i].set_yticks(())\n \n ax[0].legend(self.marker, loc='best', fontsize=8)\n plt.tight_layout() # let's make good plots\n plt.show()\n\n\n def plot_feature_significance_heatmap(self):\n sorted_significance_order = np.flip(np.argsort(abs(self.feature_significance[0])))\n sorted_feature_significance = np.zeros(self.feature_significance.shape)\n sorted_feature_significance[0] = np.array(self.feature_significance[0])[sorted_significance_order]\n sorted_feature_significance[1] = np.array(self.feature_significance[1])[sorted_significance_order]\n sorted_feature_names = np.array(self.feature_names)[sorted_significance_order]\n\n plt.matshow(np.array(sorted_feature_significance), cmap='gist_heat')\n plt.yticks([0,1], ['1st Comp','2nd Comp'], fontsize=10)\n plt.colorbar()\n plt.xticks(range(len(sorted_feature_names)), sorted_feature_names, rotation=65, ha='left')\n plt.show()\n\n\n def plot_feature_significance_vectors(self):\n score = self.projected\n coeff = np.transpose(self.feature_significance)\n labels=self.feature_names\n xs = score[:,0]\n ys = score[:,1]\n n = coeff.shape[0]\n scalex = 1.0/(xs.max() - xs.min())\n scaley = 1.0/(ys.max() - ys.min())\n\n plt.figure(figsize=(10, 9))\n ax = plt.axes()\n ax.scatter(xs * scalex,ys * scaley, c = self.targets)\n for i in range(n):\n ax.arrow(0, 0, coeff[i,0], coeff[i,1],color = 'r',alpha = 0.5)\n if labels is None:\n ax.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, \"Var\"+str(i+1), color = 'g', ha = 'center', va = 'center')\n else:\n ax.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')\n ax.set_xlabel(\"PC {}\".format(1))\n ax.set_ylabel(\"PC {}\".format(2))\n plt.show()\n\n\n\n", "sub_path": "morphanalysis.py", "file_name": "morphanalysis.py", "file_ext": "py", "file_size_in_byte": 44455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "skimage.color.rgb2gray", "line_number": 49, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 49, "usage_type": "attribute"}, {"api_name": "skimage.util.invert", "line_number": 50, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 50, "usage_type": "attribute"}, {"api_name": "skimage.util.invert", "line_number": 53, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 53, "usage_type": "attribute"}, {"api_name": "skimage.feature.blob_log", "line_number": 62, "usage_type": "call"}, {"api_name": "skimage.feature.blob_log", "line_number": 64, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.take", "line_number": 70, "usage_type": "call"}, {"api_name": "skimage.feature.blob_log", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.indices", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 120, "usage_type": "call"}, {"api_name": "skimage.color.rgb2gray", "line_number": 141, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 141, "usage_type": "attribute"}, {"api_name": "skimage.transform.match_histograms", "line_number": 142, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.percentile", "line_number": 145, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 146, "usage_type": "call"}, {"api_name": "skimage.exposure", "line_number": 146, "usage_type": "attribute"}, {"api_name": "skimage.filters.threshold_otsu", "line_number": 148, "usage_type": "call"}, {"api_name": "skimage.util.invert", "line_number": 153, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 153, "usage_type": "attribute"}, {"api_name": "skimage.morphology.closing", "line_number": 157, "usage_type": "call"}, {"api_name": "skimage.morphology.square", "line_number": 157, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 159, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 172, "usage_type": "call"}, {"api_name": "scipy.ndimage.binary_fill_holes", "line_number": 178, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 182, "usage_type": "call"}, {"api_name": "skimage.morphology.skeletonize", "line_number": 217, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.nonzero", "line_number": 223, "usage_type": "call"}, {"api_name": "skimage.util.pad", "line_number": 237, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 237, "usage_type": "attribute"}, {"api_name": "numpy.ndenumerate", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 248, "usage_type": "call"}, {"api_name": "skimage.morphology.convex_hull_image", "line_number": 256, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 256, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 262, "usage_type": "call"}, {"api_name": "skan.skeleton_to_csgraph", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 270, "usage_type": "call"}, {"api_name": "scipy.ndimage.generate_binary_structure", "line_number": 271, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 271, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.label", "line_number": 272, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 272, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "numpy.delete", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 360, "usage_type": "call"}, {"api_name": "skan.csr.Skeleton", "line_number": 369, "usage_type": "call"}, {"api_name": "skan.csr", "line_number": 369, "usage_type": "attribute"}, {"api_name": "skan.csr.Skeleton", "line_number": 370, "usage_type": "call"}, {"api_name": "skan.csr", "line_number": 370, "usage_type": "attribute"}, {"api_name": "skan.csr.Skeleton", "line_number": 376, "usage_type": "call"}, {"api_name": "skan.csr", "line_number": 376, "usage_type": "attribute"}, {"api_name": "skan.skeleton_to_csgraph", "line_number": 392, "usage_type": "call"}, {"api_name": "skan.csr.branch_statistics", "line_number": 393, "usage_type": "call"}, {"api_name": "skan.csr", "line_number": 393, "usage_type": "attribute"}, {"api_name": "skan.csr.Skeleton", "line_number": 394, "usage_type": "call"}, {"api_name": "skan.csr", "line_number": 394, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 436, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 436, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 445, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 445, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 449, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 449, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 450, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 450, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 541, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 543, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.ndenumerate", "line_number": 547, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 550, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 561, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 565, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 566, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 578, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 581, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 588, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 588, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 592, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 592, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 595, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 596, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 596, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 599, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 599, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 600, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 600, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 601, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 601, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 602, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 602, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 604, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 611, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 611, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 618, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 618, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 620, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 620, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 629, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 630, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 630, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 636, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 636, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 648, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 653, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 658, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 658, "usage_type": "name"}, {"api_name": "scipy.stats.skew", "line_number": 659, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 659, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 671, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 671, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 673, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 673, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 687, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 687, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 689, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 690, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 690, "usage_type": "name"}, {"api_name": "skimage.measure.label", "line_number": 758, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 779, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 782, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 782, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 853, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 854, "usage_type": "call"}, {"api_name": "os.path", "line_number": 854, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 854, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 855, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 856, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 858, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 861, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 874, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 876, "usage_type": "call"}, {"api_name": "os.path", "line_number": 876, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 876, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 877, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 878, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 880, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 882, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 913, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 932, "usage_type": "call"}, {"api_name": "scipy.stats.sem", "line_number": 933, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 933, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 934, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 934, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 936, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 936, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 937, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 937, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 938, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 938, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 939, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 939, "usage_type": "name"}, {"api_name": "numpy.linalg.eigh", "line_number": 954, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 954, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 960, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 963, "usage_type": "call"}, {"api_name": "matplotlib.patches.Ellipse", "line_number": 965, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 965, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 968, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 968, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MaxAbsScaler", "line_number": 971, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 971, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 996, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 996, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 998, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 999, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1000, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1001, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 1002, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 1007, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1007, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 1008, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1008, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1009, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1009, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1010, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1010, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 1014, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1014, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1015, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1016, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1016, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1017, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1017, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 1021, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 1029, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1029, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1030, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1030, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 1034, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 1034, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1035, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1036, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1037, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1038, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.matshow", "line_number": 1040, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1040, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1040, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 1041, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1041, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 1042, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1042, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 1043, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1043, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1044, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1044, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 1049, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1057, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1057, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 1058, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1058, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1068, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1068, "usage_type": "name"}]} {"seq_id": "67954046", "text": "# Import `Site` and the `ui` module from the `h2o_wave` package\nfrom h2o_wave import site, ui,main\nimport time\nimport requests\n\ndef mifun(text):\n if len(text)<1:\n return text\n else:\n response = requests.post('http://api/ner', data = {'text':'Each component, API-Service'})\n #print(response)\n return response\n# Get the web page at route '/demo'.\n# If you're running this example on your local machine,\n# this page will refer to http://localhost:10101/demo.\npage = site['/ner_ui']\n\n# Add a Markdown card named `hello` to the page.\n# page['hello'] = ui.markdown_card(\n# box='1 1 2 2',\n# title='Hello World!',\n# content='And now for something completely different!',\n# )\ntext='asdf'\npage['ner'] = ui.form_card(box='1 1 4 10', items=[\n ui.textbox(name='text', label='English', value=text or '', multiline=True),\n ui.label('Pig Latin'),\n ui.text(mifun(text) or '*Type in some text above to translate to Pig Latin!*'),\n ])\n\nbeer_verse = '''={{before}} bottles of beer on the wall, {{before}} bottles of beer.\n\nTake one down, pass it around, {{after}} bottles of beer on the wall...\n'''\n\nbeer_card = page.add('wall', ui.markdown_card(\n box='6 1 4 2',\n title='99 Bottles of Beer',\n content=beer_verse,\n data=dict(before='99', after='98'),\n))\n\n\n\nfor i in range(99, 0, -1):\n beer_card.data.before = str(i)\n beer_card.data.after = str(i - 1)\n page.save()\n time.sleep(1)\n\n\n \n\n\n\n\n", "sub_path": "ner_ui.py", "file_name": "ner_ui.py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.post", "line_number": 10, "usage_type": "call"}, {"api_name": "h2o_wave.site", "line_number": 16, "usage_type": "name"}, {"api_name": "h2o_wave.ui.form_card", "line_number": 25, "usage_type": "call"}, {"api_name": "h2o_wave.ui", "line_number": 25, "usage_type": "name"}, {"api_name": "h2o_wave.ui.textbox", "line_number": 26, "usage_type": "call"}, {"api_name": "h2o_wave.ui", "line_number": 26, "usage_type": "name"}, {"api_name": "h2o_wave.ui.label", "line_number": 27, "usage_type": "call"}, {"api_name": "h2o_wave.ui", "line_number": 27, "usage_type": "name"}, {"api_name": "h2o_wave.ui.text", "line_number": 28, "usage_type": "call"}, {"api_name": "h2o_wave.ui", "line_number": 28, "usage_type": "name"}, {"api_name": "h2o_wave.ui.markdown_card", "line_number": 36, "usage_type": "call"}, {"api_name": "h2o_wave.ui", "line_number": 36, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}]} {"seq_id": "540076167", "text": "# -*- encoding: utf-8 -*-\n\nimport base64\n\nimport time\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\n\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp import tools\n\nimport logging\n\n# _logger = logging.getLogger(__name__)\n\nclass account_rus_import(osv.osv_memory):\n _name = 'account.rus.import'\n _description = 'Import Russian statements file'\n _columns = {\n 'file_data': fields.binary('Import statements File', required=True),\n 'file_fname': fields.char('Import statements Filename', size=128, required=True),\n 'note': fields.text('Log'),\n 'temporary_account_id': fields.many2one('account.account', 'Temporary Account', help=\"It acts as a temporary account for general amount\", required=True), # domain=\"[('type','!=','view')]\"\n }\n\n def _get_default_tmp_account(self, cr, uid, context):\n tmp_accounts = self.pool.get('account.account').search(cr, uid, [('code', '=', '490000')])\n if tmp_accounts and len(tmp_accounts) > 0:\n tmp_account_id = tmp_accounts[0]\n else:\n tmp_account_id = False\n return tmp_account_id\n\n _defaults = {\n 'file_fname': lambda *a: '',\n 'temporary_account_id': _get_default_tmp_account,\n }\n\n def file_parsing(self, cr, uid, ids, context=None, batch=False, rfile=None, rfilename=None):\n\n def acc_number_parsing(obj, line_number):\n acc_res = {}\n acc_res_number = ''\n while rmspaces(obj[line_number][0]) != u'ะšะพะฝะตั†ะ ะฐัั‡ะกั‡ะตั‚': # !!! ะ”ะพะบัƒะผะตะฝั‚\n if obj[line_number][0] == u'ะ”ะฐั‚ะฐะะฐั‡ะฐะปะฐ':\n acc_res['begin_date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT,\n time.strptime(rmspaces(obj[line_number][1]), '%d.%m.%Y'))\n elif obj[line_number][0] == u'ะ”ะฐั‚ะฐะšะพะฝั†ะฐ':\n acc_res['end_date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT,\n time.strptime(rmspaces(obj[line_number][1]), '%d.%m.%Y'))\n elif obj[line_number][0] == u'ะ ะฐัั‡ะกั‡ะตั‚':\n acc_res_number = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะะฐั‡ะฐะปัŒะฝั‹ะนะžัั‚ะฐั‚ะพะบ':\n acc_res['balance_start'] = float(rmspaces(obj[line_number][1]))\n elif obj[line_number][0] == u'ะ’ัะตะณะพะŸะพัั‚ัƒะฟะธะปะพ':\n acc_res['balance_plus'] = float(rmspaces(obj[line_number][1]))\n elif obj[line_number][0] == u'ะ’ัะตะณะพะกะฟะธัะฐะฝะพ':\n acc_res['balance_minus'] = float(rmspaces(obj[line_number][1]))\n elif obj[line_number][0] == u'ะšะพะฝะตั‡ะฝั‹ะนะžัั‚ะฐั‚ะพะบ':\n acc_res['balance_end'] = float(rmspaces(obj[line_number][1]))\n line_number += 1\n line_number += 1\n return acc_res_number, acc_res, line_number\n\n if context is None:\n context = {}\n if batch:\n rfile = str(rfile)\n rfilename = rfilename\n else:\n data = self.browse(cr, uid, ids)[0]\n try:\n rfile = data.file_data\n rfilename = data.file_fname\n temporaryaccount = data.temporary_account_id.id\n except:\n raise osv.except_osv(_('Error'), _('Wizard in incorrect state. Please hit the Cancel button'))\n # return {}\n recordlist = unicode(base64.decodestring(rfile), 'windows-1251', 'strict').split('\\n')\n strobj = []\n for line in recordlist:\n strobj.append(line.split('='))\n\n format_import_file = ''\n encoding_file = ''\n statements = {}\n note = []\n inc_desc = 1\n if rmspaces(recordlist[0]) != '1CClientBankExchange':\n raise osv.except_osv(_('Error'), _('Incorrect description of import file'))\n\n if strobj[inc_desc][0] == u'ะ’ะตั€ัะธัะคะพั€ะผะฐั‚ะฐ':\n format_import_file = rmspaces(strobj[inc_desc][1])\n note.append(recordlist[inc_desc] + '\\n')\n inc_desc += 1\n else:\n raise osv.except_osv(_('Error'), _('Incorrect description of import file'))\n\n if strobj[inc_desc][0] == u'ะšะพะดะธั€ะพะฒะบะฐ':\n encoding_file = rmspaces(strobj[inc_desc][1])\n note.append(recordlist[inc_desc] + '\\n')\n inc_desc += 1\n else:\n raise osv.except_osv(_('Error'), _('Incorrect description of import file'))\n\n if strobj[inc_desc][0] == u'ะžั‚ะฟั€ะฐะฒะธั‚ะตะปัŒ':\n note.append(recordlist[inc_desc] + '\\n')\n inc_desc += 1\n if strobj[inc_desc][0] == u'ะŸะพะปัƒั‡ะฐั‚ะตะปัŒ':\n note.append(recordlist[inc_desc] + '\\n')\n inc_desc += 1\n else:\n raise osv.except_osv(_('Error'), _('Incorrect description of import file'))\n \n if strobj[inc_desc][0] == u'ะ”ะฐั‚ะฐะกะพะทะดะฐะฝะธั':\n note.append(recordlist[inc_desc] + '\\n')\n inc_desc += 1\n if strobj[inc_desc][0] == u'ะ’ั€ะตะผัะกะพะทะดะฐะฝะธั':\n note.append(recordlist[inc_desc] + '\\n')\n inc_desc += 1\n\n if strobj[inc_desc][0] == u'ะ”ะฐั‚ะฐะะฐั‡ะฐะปะฐ':\n note.append(recordlist[inc_desc] + '\\n')\n statements['begin_date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT,\n time.strptime(rmspaces(strobj[inc_desc][1]), '%d.%m.%Y'))\n tmp_acc, tmp_acc_res, slide_i = acc_number_parsing(strobj, inc_desc)\n inc_desc += 1\n else:\n raise osv.except_osv(_('Error'), _('Incorrect description of import file'))\n if strobj[inc_desc][0] == u'ะ”ะฐั‚ะฐะšะพะฝั†ะฐ':\n note.append(recordlist[inc_desc] + '\\n')\n statements['end_date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT,\n time.strptime(rmspaces(strobj[inc_desc][1]), '%d.%m.%Y'))\n inc_desc += 1\n else:\n raise osv.except_osv(_('Error'), _('Incorrect description of import file'))\n acc_numbers = []\n while strobj[inc_desc][0] == u'ะ ะฐัั‡ะกั‡ะตั‚':\n acc_number = {}\n acc_number['detail'] = []\n acc_number['statement_line'] = []\n acc_number['acc_number'] = rmspaces(strobj[inc_desc][1])\n acc_number['journal_id'] = False\n acc_number['bank_account'] = False\n bank_ids = self.pool.get('res.partner.bank').search(cr, uid,\n [('acc_number', '=', acc_number['acc_number'])])\n if bank_ids and len(bank_ids) > 0:\n bank_accs = self.pool.get('res.partner.bank').browse(cr, uid, bank_ids)\n for bank_acc in bank_accs:\n if bank_acc.journal_id.id:\n acc_number['journal_id'] = bank_acc.journal_id\n acc_number['bank_account'] = bank_acc\n break\n if not acc_number['bank_account']:\n raise osv.except_osv(_('Error'), _(\"No matching Bank Account (with Account Journal) found.\\n\\n\"\n \"Please set-up a Bank Account with as Account Number '%s' \"\n \"and an Account Journal.\") % (acc_number['acc_number']))\n acc_numbers.append(acc_number)\n inc_desc += 1\n\n def statement_line_parsing(obj, line_number):\n statementLine = {}\n statementLine['note'] = []\n while rmspaces(obj[line_number][0]) != u'ะšะพะฝะตั†ะ”ะพะบัƒะผะตะฝั‚ะฐ':\n if obj[line_number][0] == u'ะะพะผะตั€':\n statementLine['ref'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะ”ะฐั‚ะฐ':\n statementLine['date'] = time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT, time.strptime(rmspaces(obj[line_number][1]),'%d.%m.%Y'))\n elif obj[line_number][0] == u'ะกัƒะผะผะฐ':\n statementLine['amount'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะŸะปะฐั‚ะตะปัŒั‰ะธะบะกั‡ะตั‚':\n statementLine['payer_acc'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะŸะปะฐั‚ะตะปัŒั‰ะธะบ':\n statementLine['payer'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะŸะปะฐั‚ะตะปัŒั‰ะธะบะ˜ะะ':\n statementLine['payer_inn'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะŸะพะปัƒั‡ะฐั‚ะตะปัŒะกั‡ะตั‚':\n statementLine['recipient_acc'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะŸะพะปัƒั‡ะฐั‚ะตะปัŒ':\n statementLine['recipient'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะŸะพะปัƒั‡ะฐั‚ะตะปัŒะ˜ะะ':\n statementLine['recipient_inn'] = rmspaces(obj[line_number][1])\n elif obj[line_number][0] == u'ะะฐะทะฝะฐั‡ะตะฝะธะตะŸะปะฐั‚ะตะถะฐ':\n statementLine['name'] = rmspaces(obj[line_number][1])\n else:\n statementLine['note'].append(obj[line_number][0] + ': ' + obj[line_number][1])\n line_number += 1\n line_number += 1\n return statementLine, line_number\n\n #for i in range(inc_desc,len(strobj)-1):\n i = inc_desc\n while rmspaces(strobj[i][0]) != u'ะšะพะฝะตั†ะคะฐะนะปะฐ':\n # _logger = logging.getLogger(__name__)\n # _logger.info('ENDFILE %s %s %s', i, len(strobj)-1, strobj[i][0])\n if strobj[i][0] == u'ะกะตะบั†ะธัะ ะฐัั‡ะกั‡ะตั‚': # ะ”ะพะบัƒะผะตะฝั‚\n # _logger = logging.getLogger(__name__)\n # _logger.info('SEC RASH')\n for acc_one in acc_numbers:\n if acc_one['acc_number'] == tmp_acc:\n acc_one['detail'].append(tmp_acc_res)\n break\n\n elif strobj[i][0] == u'ะกะตะบั†ะธัะ”ะพะบัƒะผะตะฝั‚':\n tmp_statementLine, slide_i = statement_line_parsing(strobj, i)\n # _logger = logging.getLogger(__name__)\n # _logger.info('SEC DOC')\n for acc_one in acc_numbers:\n if acc_one['acc_number'] == tmp_statementLine['payer_acc'] or \\\n acc_one['acc_number'] == tmp_statementLine['recipient_acc']:\n tmp_statementLine['sequence'] = len(acc_one['statement_line']) + 1\n acc_one['statement_line'].append(tmp_statementLine)\n break\n if i < (len(strobj)-2):\n i += 1\n\n for slide_i, statement in enumerate(acc_numbers):\n period_id = self.pool.get('account.period').search(cr, uid,\n [('company_id', '=', statement['journal_id'].company_id.id),\n ('date_start', '<=', statements['end_date']),\n ('date_stop', '>=', statements['end_date'])])\n if not period_id and len(period_id) == 0:\n raise osv.except_osv(_('Error'), _(\"The Statement New Balance date doesn't fall within a defined Accounting Period! Please create the Accounting Period for date %s for the company %s.\") % (statements['end_date'], statement['journal_id'].company_id.name))\n statement['period_id'] = period_id[0]\n\n statement['note'] = note\n cr.execute('SELECT balance_end_real \\\n FROM account_bank_statement \\\n WHERE journal_id = %s and date <= %s \\\n ORDER BY date DESC,id DESC LIMIT 1', (statement['journal_id'].id, statements['begin_date']))\n res = cr.fetchone()\n balance_start_check = res and res[0]\n if balance_start_check == None:\n if statement['journal_id'].default_debit_account_id and (statement['journal_id'].default_credit_account_id == statement['journal_id'].default_debit_account_id):\n balance_start_check = statement['journal_id'].default_debit_account_id.balance\n else:\n raise osv.except_osv(_('Error'), _(\"Configuration Error in journal %s!\\nPlease verify the Default Debit and Credit Account settings.\") % statement['journal_id'].name)\n if balance_start_check != tmp_acc_res['balance_start']: # statement['balance_start']\n statement['note'].append(_(\"The Statement %s Starting Balance (%.2f) does not correspond with the previous Closing Balance (%.2f) in journal %s!\") % (statement['acc_number'] + ' #' + statements['begin_date'] + ':' + statements['end_date'], tmp_acc_res['balance_start'], balance_start_check, statement['journal_id'].name))\n if not(statement.get('period_id')):\n raise osv.except_osv(_('Error'), _(' No transactions or no period in file !'))\n data = {\n 'name': statement['acc_number'] + ' #' + statements['begin_date'] + ':' + statements['end_date'],\n 'date': datetime.now(),\n 'journal_id': statement['journal_id'].id,\n 'period_id': statement['period_id'],\n 'balance_start': tmp_acc_res['balance_start'], # 'balance_start': statement['balance_start'],\n 'balance_end_real': tmp_acc_res['balance_end'],\n }\n statement['id'] = self.pool.get('account.bank.statement').create(cr, uid, data, context=context)\n for line in statement['statement_line']:\n partner = None\n partner_id = None\n invoice = False\n if line['payer_acc'] == statement['acc_number']:\n pass\n else:\n ids = self.pool.get('res.partner.bank').search(cr, uid, [('acc_number', '=', str(line['payer_acc']))])\n _logger = logging.getLogger(__name__)\n _logger.info('IDS %s', ids)\n p_id = None\n\n if ids and len(ids) > 0:\n partner = self.pool.get('res.partner.bank').browse(cr, uid, ids[0], context=context)\n _logger = logging.getLogger(__name__)\n _logger.info('PARTNER %s', partner)\n p_id = partner.partner_id.id\n line['account'] = partner.partner_id.property_account_receivable.id\n ids = ids[0]\n else:\n _logger = logging.getLogger(__name__)\n _logger.info('PARTNER_NO %s', ids)\n ids = ''\n if not partner and not invoice:\n line['account'] = temporaryaccount\n line['name'] = line['name'] + '\\n' + line['payer_inn']\n data = {\n 'name': line['name'],\n 'note': '\\n'.join(line['note']),\n 'date': line['date'],\n 'amount': line['amount'],\n 'partner_id': p_id,\n #'account_id': line['account'], #Looks like reconsilation will not work with it\n 'statement_id': statement['id'],\n 'ref': line['ref'],\n 'sequence': line['sequence'],\n 'bank_account_id': ids,\n }\n data_check = {\n 'date': line['date'],\n 'amount': line['amount'],\n 'ref': line['ref'],\n 'account_id': line['account'],\n }\n ids_line_statement = self.pool.get('account.bank.statement.line').search(cr, uid,\n [('date', '=', data_check['date']),\n ('amount', '=', data_check['amount']),\n ('ref', '=', data_check['ref']),\n #('account_id', '=', data_check['account_id'])\n ])\n if ids_line_statement:\n statement['note'].append(_('Statement line %s from %s alredy exist.') %\n (data_check['ref'], data_check['date']))\n else:\n self.pool.get('account.bank.statement.line').create(cr, uid, data, context=context)\n \n if statement['note']:\n self.pool.get('account.bank.statement').write(cr, uid, [statement['id']],\n {'rusimport_note': '\\n'.join(statement['note'])},\n context=context)\n model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid,\n 'account', 'action_bank_statement_tree')\n action = self.pool[model].browse(cr, uid, action_id, context=context)\n return {\n 'name': action.name,\n 'view_type': action.view_type,\n 'view_mode': action.view_mode,\n 'res_model': action.res_model,\n 'domain': action.domain,\n 'context': action.context,\n 'type': 'ir.actions.act_window',\n 'search_view_id': action.search_view_id.id,\n 'views': [(v.view_id.id, v.view_mode) for v in action.view_ids]\n }\n\n\n\ndef rmspaces(s):\n return \" \".join(s.split())", "sub_path": "csv_operations.py", "file_name": "csv_operations.py", "file_ext": "py", "file_size_in_byte": 18245, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "openerp.osv.osv.osv_memory", "line_number": 18, "usage_type": "attribute"}, {"api_name": "openerp.osv.osv", "line_number": 18, "usage_type": "name"}, {"api_name": "openerp.osv.fields.binary", "line_number": 22, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "openerp.osv.fields.char", "line_number": 23, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "openerp.osv.fields.text", "line_number": 24, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "openerp.osv.fields.many2one", "line_number": 25, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 48, "usage_type": "call"}, {"api_name": "openerp.tools.DEFAULT_SERVER_DATE_FORMAT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "openerp.tools", "line_number": 48, "usage_type": "name"}, {"api_name": "time.strptime", "line_number": 49, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 51, "usage_type": "call"}, {"api_name": "openerp.tools.DEFAULT_SERVER_DATE_FORMAT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "openerp.tools", "line_number": 51, "usage_type": "name"}, {"api_name": "time.strptime", "line_number": 52, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 79, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 79, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 79, "usage_type": "call"}, {"api_name": "base64.decodestring", "line_number": 81, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 92, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 92, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 92, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 99, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 99, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 99, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 106, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 106, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 106, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 115, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 115, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 115, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 126, "usage_type": "call"}, {"api_name": "openerp.tools.DEFAULT_SERVER_DATE_FORMAT", "line_number": 126, "usage_type": "attribute"}, {"api_name": "openerp.tools", "line_number": 126, "usage_type": "name"}, {"api_name": "time.strptime", "line_number": 127, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 131, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 131, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 131, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 134, "usage_type": "call"}, {"api_name": "openerp.tools.DEFAULT_SERVER_DATE_FORMAT", "line_number": 134, "usage_type": "attribute"}, {"api_name": "openerp.tools", "line_number": 134, "usage_type": "name"}, {"api_name": "time.strptime", "line_number": 135, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 138, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 138, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 138, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 157, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 157, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 157, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 170, "usage_type": "call"}, {"api_name": "openerp.tools.DEFAULT_SERVER_DATE_FORMAT", "line_number": 170, "usage_type": "attribute"}, {"api_name": "openerp.tools", "line_number": 170, "usage_type": "name"}, {"api_name": "time.strptime", "line_number": 170, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 225, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 225, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 225, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 239, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 239, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 239, "usage_type": "call"}, {"api_name": "openerp.tools.translate._", "line_number": 241, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 243, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 243, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 246, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 246, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 261, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 267, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 273, "usage_type": "call"}, {"api_name": "openerp.tools.translate._", "line_number": 304, "usage_type": "call"}]} {"seq_id": "210365023", "text": "from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom store.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse,HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Avg\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'store/index.html')\n\ndef bookDetailView(request, bid):\n template_name = 'store/book_detail.html'\n try:\n book = Book.objects.get(pk=bid)\n except:\n return HttpResponse('

Requested Book Not Available!

')\n \n num_available = len(BookCopy.objects.filter(book__exact=book,status__exact=True))\n \n context = {\n 'book': book, # set this to an instance of the required book\n 'num_available': num_available, # set this to the number of copies of the book available, or 0 if the book isn't available\n }\n # START YOUR CODE HERE\n\n return render(request, template_name, context=context)\n\n\n@csrf_exempt\ndef bookListView(request):\n template_name = 'store/book_list.html'\n get_data = request.GET\n\n books = Book.objects.filter(\n title__icontains=get_data.get('title',''), \n author__icontains=get_data.get('author',''),\n genre__icontains=get_data.get('genre', '')\n )\n context = {\n 'books': books, # set this to the list of required books upon filtering using the GET parameters\n # (i.e. the book search feature will also be implemented in this view)\n }\n return render(request, template_name, context=context)\n\n@login_required\ndef viewLoanedBooks(request):\n template_name = 'store/loaned_books.html'\n books = BookCopy.objects.filter(borrower__exact=request.user)\n context = {\n 'books': books,\n }\n '''\n The above key 'books' in the context dictionary should contain a list of instances of the \n BookCopy model. Only those book copies should be included which have been loaned by the user.\n '''\n # START YOUR CODE HERE\n \n\n\n return render(request, template_name, context=context)\n\n@csrf_exempt\n@login_required\ndef loanBookView(request):\n \n '''\n Check if an instance of the asked book is available.\n If yes, then set the message to 'success', otherwise 'failure'\n '''\n # START YOUR CODE HERE\n if not request.user.is_authenticated:\n return render(request,'login.html',{\"message\":\"Login to loan book\"})\n \n book_id = request.POST['bid'] # get the book id from post data\n books = BookCopy.objects.filter(book_id__exact=book_id, status__exact=True)\n if books:\n books[0].borrower = request.user\n books[0].borrow_date = datetime.date.today()\n books[0].status = False\n books[0].save()\n message = \"success\"\n else:\n message = \"failure\"\n response_data = {\n 'message': message,\n }\n\n return JsonResponse(response_data)\n\n'''\nFILL IN THE BELOW VIEW BY YOURSELF.\nThis view will return the issued book.\nYou need to accept the book id as argument from a post request.\nYou additionally need to complete the returnBook function in the loaned_books.html file\nto make this feature complete\n''' \n@csrf_exempt\n@login_required\ndef returnBookView(request):\n if request.method == \"POST\":\n try:\n book_id = request.POST['bid']\n book = BookCopy.objects.get(pk=book_id)\n book.borrower = None\n book.borrow_date = None\n book.status = True\n book.save()\n return JsonResponse( {\"message\":\"Book returned successfully\"} )\n except:\n return JsonResponse( {\"message\":\"Book not found\"} )\n else:\n return JsonResponse( {\"message\":\"Invalid request method\"} )\n\n@csrf_exempt\n@login_required\ndef rateBookView(request):\n if request.method == \"POST\":\n book_id = request.POST['bid']\n username = request.user.username\n new_rating = request.POST['rating']\n book = Book.objects.get(pk=book_id)\n\n if new_rating>10 or new_rating<0:\n return JsonResponse({'message':'Rating not in range 0-10'})\n else:\n try:\n current_user_book_rating_object = BookRating.objects.get_or_create(book=book, username=username)\n current_user_book_rating_object.rating = new_rating\n current_user_book_rating_object.save()\n \n book.rating = BookRating.objects.filter(book = book).aggregate(rating=Avg('rating'))['rating']\n book.save() \n return JsonResponse({'message':'success'})\n except:\n return JsonResponse({'message':\"error\"})\n\n else:\n return JsonResponse({'message':\"Invalid request method\"})\n \n \n \n\n\n", "sub_path": "store/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 65, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 66, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 111, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 113, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 115, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 100, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 101, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models.Avg", "line_number": 134, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 136, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 138, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 141, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 117, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 118, "usage_type": "name"}]} {"seq_id": "101027119", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\nimport re\nimport time\n\nfrom .base import * # noqa\n\n# don't use an unicode string\nlocaleID = 'ja_JP'\ndateSep = ['-']\ntimeSep = [':', 'h']\nmeridian = ['ๅˆๅ‰', 'ๅˆๅพŒ']\nusesMeridian = True\nuses24 = True\nWeekdayOffsets = {}\nMonthOffsets = {}\nuseAccuracy = False\ntryRegexParsing = True\n\n# always lowercase any lookup values - helper code expects that\nWeekdays = [\n 'ๆœˆๆ›œๆ—ฅ', '็ซๆ›œๆ—ฅ', 'ๆฐดๆ›œๆ—ฅ', 'ๆœจๆ›œๆ—ฅ', '้‡‘ๆ›œๆ—ฅ', 'ๅœŸๆ›œๆ—ฅ', 'ๆ—ฅๆ›œๆ—ฅ',\n]\n\nshortWeekdays = [\n 'ๆœˆ', '็ซ', 'ๆฐด', 'ๆœจ', '้‡‘', 'ๅœŸ', 'ๆ—ฅ',\n]\n\nMonths = [\n '1ๆœˆ', '2ๆœˆ', '3ๆœˆ', '4ๆœˆ', '5ๆœˆ', '6ๆœˆ',\n '7ๆœˆ', '8ๆœˆ', '9ๆœˆ', '10ๆœˆ', '11ๆœˆ', '12ๆœˆ',\n]\n\n# We do not list 'mar' as a short name for 'mars' as it conflicts with\n# the 'mar' of 'mardi'\nshortMonths = [\n '1ๆœˆ', '2ๆœˆ', '3ๆœˆ', '4ๆœˆ', '5ๆœˆ', '6ๆœˆ',\n '7ๆœˆ', '8ๆœˆ', '9ๆœˆ', '10ๆœˆ', '11ๆœˆ', '12ๆœˆ',\n]\n\n# use the same formats as ICU by default\ndateFormats = {\n 'full': 'yyyyๅนดMMMMๆœˆdๆ—ฅใฎEEEE',\n 'long': 'yyyy MMMM d',\n 'medium': 'yyyy MMM d',\n 'short': 'yy-M-d'\n}\n\ntimeFormats = {\n 'full': 'hh:mm:ss a z',\n 'long': 'h:mm:ss a z',\n 'medium': 'h:mm:ss a',\n 'short': 'h:mm a',\n}\n\ndp_order = ['y', 'm', 'd']\n\n# Used to parse expressions like \"in 5 hours\"\nnumbers = {\n '้›ถ': 0,\n 'ไธ€': 1,\n 'ไบŒ': 2,\n 'ไธ‰': 3,\n 'ๅ››': 4,\n 'ไบ”': 5,\n 'ๅ…ญ': 6,\n 'ไธƒ': 7,\n 'ๅ…ซ': 8,\n 'ไน': 9,\n 'ๅ': 10,\n}\n\ndecimal_mark = '.'\n\n# this will be added to re_values later\nunits = {\n 'seconds': ['็ง’'],\n 'minutes': ['ๅˆ†'],\n 'hours': ['ๆ™‚'],\n 'days': ['ๆ—ฅ'],\n 'weeks': ['้€ฑ'],\n 'months': ['ๆœˆ'],\n 'years': ['ๅนด'],\n}\n\n# text constants to be used by later regular expressions\nre_values = {\n 'specials': '',\n 'timeseparator': \":\",\n 'rangeseparator': '-',\n 'daysuffix': 'ๆ—ฅ',\n 'meridian': 'ๅˆๅ‰|ๅคœๆ˜Žใ‘|ๅˆๅพŒ|ๆ˜ผ|ๅคœ',\n 'qunits': 'ๆ™‚|ๅˆ†|็ง’|ๆ—ฅ|้€ฑ|ๆœˆ|ๅนด',\n 'now': ['ไปŠ'],\n}\n\n# Used to adjust the returned date before/after the source\nModifiers = {\n 'ๅ‰ใ€…': -2,\n 'ๅ…ˆใ€…': -2,\n 'ๅ…ˆ': -1,\n 'ๅ‰': -1,\n '้ŽๅŽป': -1,\n 'ๆฌกใฎ': 1,\n 'ๆฌก': 1,\n 'ๆ˜ŽๅพŒ': 2,\n 'ๅพŒ': 1,\n 'ๅ†ๆฅ': 2,\n 'ๆฅ': 1,\n}\n\ndayOffsets = {\n 'ๆ˜ŽๅพŒๆ—ฅ': 2,\n 'ใ‚ใ•ใฃใฆ': 2,\n 'ๆ˜Žๆ—ฅ': 1,\n 'ไปŠๆ—ฅ': 0,\n 'ๆ˜จๆ—ฅ': -1,\n 'ๅ‰ๆ—ฅ': -1,\n 'ไธ€ๆ˜จๆ—ฅ': -2,\n 'ใŠใจใจใ„': -2,\n}\n\n# special day and/or times, i.e. lunch, noon, evening\n# each element in the dictionary is a dictionary that is used\n# to fill in any value to be replace - the current date/time will\n# already have been populated by the method buildSources\nre_sources = {\n 'ๆœ': {'hr': 6, 'mn': 0, 'sec': 0},\n 'ๅˆๅ‰': {'hr': 9, 'mn': 0, 'sec': 0},\n 'ๆญฃๅˆ': {'hr': 12, 'mn': 0, 'sec': 0},\n 'ๅˆๅพŒ': {'hr': 13, 'mn': 0, 'sec': 0},\n 'ๆ˜ผ': {'hr': 13, 'mn': 0, 'sec': 0},\n 'ๅคœ': {'hr': 19, 'mn': 0, 'sec': 0},\n}\n\nsmall = {\n '้›ถ': 0,\n 'ไธ€': 1,\n 'ไบŒ': 2,\n 'ไธ‰': 3,\n 'ๅ››': 4,\n 'ไบ”': 5,\n 'ๅ…ญ': 6,\n 'ไธƒ': 7,\n 'ๅ…ซ': 8,\n 'ไน': 9,\n 'ๅ': 10,\n}\n\nmagnitude = {\n '็™พ': 100,\n 'ๅƒ': 1000,\n 'ไธ‡': 10000,\n 'ๅ„„': 100000000,\n 'ๅ…†': 1000000000000,\n 'ไบฌ': 10000000000000000,\n}\n\nignore = (',', 'ใฎ')\ncre_days = re.compile(rf'''\\b((?P\\d+)|(?P{'|'.join(numbers)}))\\s*ๆ—ฅ\\s*(?Pๅ‰|ๅพŒ)\\b''', re.IGNORECASE)\ncre_weeks = re.compile(rf'''\\b\\s*((?P{'|'.join(Modifiers)})|(?P{'|'.join(numbers)}))\\s*้€ฑใฎ?(?P{'|'.join(Weekdays)})\\b''', re.IGNORECASE)\ncre_date1 = re.compile(r'((?P(\\d{2})?(\\d{2}))-)?(?P1[0-2]|0?[1-9])-(?P3[01]|[12][0-9]|0?[1-9])')\ncre_date2 = re.compile(r'((?P(\\d{2})?(\\d{2}))\\/)?(?P1[0-2]|0?[1-9])\\/(?P3[01]|[12][0-9]|0?[1-9])')\ncre_date3 = re.compile(r'(((?P(\\d{2})?(\\d{2}))\\s*ๅนด)?\\s*(?P1[0-2]|0?[1-9])\\s*ๆœˆ)?\\s*(?P3[01]|[12][0-9]|0?[1-9])\\s*ๆ—ฅ')\n\n\ndef parseRegex(s, sourceTime):\n m = cre_days.match(s)\n if m:\n qty = m.group('qty')\n nums = m.group('nums')\n backforth = m.group('backforth') == 'ๅพŒ'\n\n if qty:\n n = int(qty)\n elif nums:\n n = numbers[nums]\n\n multiplier = 1 if backforth else -1\n day_delta = n * multiplier\n dt = datetime.datetime.fromtimestamp(time.mktime(sourceTime)) + datetime.timedelta(days=day_delta)\n sourceTime = dt.timetuple()\n return sourceTime, True\n\n m = cre_weeks.match(s)\n if m:\n modifiers = m.group('modifiers')\n nums = m.group('nums')\n weekday = m.group('weekday')\n\n if modifiers:\n n = Modifiers[modifiers]\n elif nums:\n n = numbers[nums]\n\n if weekday:\n w = -sourceTime.tm_wday + Weekdays.index(weekday)\n else:\n w = 0\n\n day_delta = n*7 + w\n dt = datetime.datetime.fromtimestamp(time.mktime(sourceTime)) + datetime.timedelta(days=day_delta)\n sourceTime = dt.timetuple()\n return sourceTime, True\n\n m = cre_date1.match(s)\n if not m:\n m = cre_date2.match(s)\n if not m:\n m = cre_date3.match(s)\n\n if m:\n year_str = m.group('year')\n month_str = m.group('month')\n day_str = m.group('day')\n\n year = int(year_str) if year_str else sourceTime.tm_year\n if year < 100:\n year += 2000\n month = int(month_str) if month_str else sourceTime.tm_mon\n day = int(day_str) if day_str else sourceTime.tm_mday\n\n return datetime.datetime(year, month, day).timetuple(), True\n\n return sourceTime, False\n", "sub_path": "parsedatetime/pdt_locales/ja_JP.py", "file_name": "ja_JP.py", "file_ext": "py", "file_size_in_byte": 5643, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.compile", "line_number": 162, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 162, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 163, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 163, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 164, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 165, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 166, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "call"}]} {"seq_id": "579309004", "text": "# Copyright 2016-2020 Blue Marble Analytics LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom builtins import str\nfrom collections import OrderedDict\nfrom importlib import import_module\nimport os.path\nimport sys\nimport unittest\n\nfrom tests.common_functions import create_abstract_model, add_components_and_load_data\n\nTEST_DATA_DIRECTORY = os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"..\", \"test_data\"\n)\n\n# Import prerequisite modules\nPREREQUISITE_MODULE_NAMES = [\n \"temporal.operations.timepoints\",\n \"temporal.operations.horizons\",\n \"temporal.investment.periods\",\n \"geography.load_zones\",\n \"project\",\n]\nNAME_OF_MODULE_BEING_TESTED = \"project.capacity.capacity_types.gen_ret_bin\"\nIMPORTED_PREREQ_MODULES = list()\nfor mdl in PREREQUISITE_MODULE_NAMES:\n try:\n imported_module = import_module(\".\" + str(mdl), package=\"gridpath\")\n IMPORTED_PREREQ_MODULES.append(imported_module)\n except ImportError:\n print(\"ERROR! Module \" + str(mdl) + \" not found.\")\n sys.exit(1)\n# Import the module we'll test\ntry:\n MODULE_BEING_TESTED = import_module(\n \".\" + NAME_OF_MODULE_BEING_TESTED, package=\"gridpath\"\n )\nexcept ImportError:\n print(\"ERROR! Couldn't import module \" + NAME_OF_MODULE_BEING_TESTED + \" to test.\")\n\n\nclass TestGenRetBin(unittest.TestCase):\n \"\"\" \"\"\"\n\n def test_add_model_components(self):\n \"\"\"\n Test that there are no errors when adding model components\n :return:\n \"\"\"\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n\n def test_load_model_data(self):\n \"\"\"\n Test that data are loaded with no errors\n :return:\n \"\"\"\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n\n def test_data_loaded_correctly(self):\n \"\"\"\n Test that the data loaded are as expected\n :return:\n \"\"\"\n m, data = add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n instance = m.create_instance(data)\n\n # Set: GEN_RET_BIN_OPR_PRDS\n expected_gen_set = [(\"Clunky_Old_Gen2\", 2020), (\"Clunky_Old_Gen2\", 2030)]\n actual_gen_set = sorted([(g, p) for (g, p) in instance.GEN_RET_BIN_OPR_PRDS])\n self.assertListEqual(expected_gen_set, actual_gen_set)\n\n # Param: gen_ret_bin_capacity_mw\n expected_cap = {(\"Clunky_Old_Gen2\", 2020): 10, (\"Clunky_Old_Gen2\", 2030): 10}\n actual_cap = {\n (g, p): instance.gen_ret_bin_capacity_mw[g, p]\n for (g, p) in instance.GEN_RET_BIN_OPR_PRDS\n }\n self.assertDictEqual(expected_cap, actual_cap)\n\n # Set: GEN_RET_BIN\n expected_gens = [\"Clunky_Old_Gen2\"]\n actual_gens = [g for g in instance.GEN_RET_BIN]\n self.assertListEqual(expected_gens, actual_gens)\n\n # Set: OPR_PRDS_BY_GEN_RET_BIN\n expected_periods_by_generator = {\"Clunky_Old_Gen2\": [2020, 2030]}\n actual_periods_by_generator = {\n g: [p for p in instance.OPR_PRDS_BY_GEN_RET_BIN[g]]\n for g in instance.GEN_RET_BIN\n }\n self.assertDictEqual(expected_periods_by_generator, actual_periods_by_generator)\n\n # Param: gen_ret_bin_first_period\n expected_first_period = {\"Clunky_Old_Gen2\": 2020}\n actual_first_period = {\n g: instance.gen_ret_bin_first_period[g] for g in instance.GEN_RET_BIN\n }\n self.assertDictEqual(expected_first_period, actual_first_period)\n\n # Param: gen_ret_bin_capacity_mw\n expected_cap = {(\"Clunky_Old_Gen2\", 2020): 10, (\"Clunky_Old_Gen2\", 2030): 10}\n actual_cap = {\n (g, p): instance.gen_ret_bin_capacity_mw[g, p]\n for (g, p) in instance.GEN_RET_BIN_OPR_PRDS\n }\n self.assertDictEqual(expected_cap, actual_cap)\n\n # Param: gen_ret_bin_fixed_cost_per_mw_yr\n expected_cost = {\n (\"Clunky_Old_Gen2\", 2020): 1000,\n (\"Clunky_Old_Gen2\", 2030): 1000,\n }\n actual_cost = {\n (g, p): instance.gen_ret_bin_fixed_cost_per_mw_yr[g, p]\n for (g, p) in instance.GEN_RET_BIN_OPR_PRDS\n }\n self.assertDictEqual(expected_cost, actual_cost)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "tests/project/capacity/capacity_types/test_gen_ret_bin.py", "file_name": "test_gen_ret_bin.py", "file_ext": "py", "file_size_in_byte": 5277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.path.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 27, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 42, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 42, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 49, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tests.common_functions.create_abstract_model", "line_number": 64, "usage_type": "call"}, {"api_name": "tests.common_functions.add_components_and_load_data", "line_number": 77, "usage_type": "call"}, {"api_name": "tests.common_functions.add_components_and_load_data", "line_number": 90, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 153, "usage_type": "call"}]} {"seq_id": "459640029", "text": "import logging\nimport json\nimport os\nimport signal\nimport sys\nimport urllib\nimport urlparse\n\nfrom multiprocessing.process import Process\n\nsys.path.insert(0, os.path.abspath(os.path.realpath(__file__) + '/../../../'))\n\nfrom oauth2 import Provider\nfrom oauth2.grant import ClientCredentialsGrant\nfrom oauth2.tokengenerator import Uuid4\nfrom oauth2.store.memory import ClientStore, TokenStore\nfrom oauth2.web.tornado import OAuth2Handler\nfrom tornado.ioloop import IOLoop\nfrom tornado.web import Application, url\nimport oauth2\nfrom tdaemon.handler import APIHandler\nfrom tdaemon.libs.utils import MongoDB\nimport tdaemon.libs.utils as utils\n\nclass Oauth2libHandler(APIHandler):\n def get(self):\n try:\n client_store = ClientStore()\n client_store.add_client(client_id=\"abc\", client_secret=\"xyz\",\n redirect_uris=[],\n authorized_grants=[oauth2.grant.ClientCredentialsGrant.grant_type])\n\n token_store = TokenStore()\n\n # Generator of tokens\n token_generator = oauth2.tokengenerator.Uuid4()\n token_generator.expires_in[oauth2.grant.ClientCredentialsGrant.grant_type] = 3600\n\n provider = Provider(access_token_store=token_store,\n auth_code_store=token_store, client_store=client_store,\n token_generator=token_generator)\n # provider.add_grant(AuthorizationCodeGrant(site_adapter=TestSiteAdapter()))\n provider.add_grant(ClientCredentialsGrant())\n except Exception as e:\n result = {\"success\":0,\"return_code\":unicode(e),\"error_msg\":utils.format_error()}\n\n self.finish(result)\n\ndef run_auth_server():\n client_store = ClientStore()\n client_store.add_client(client_id=\"abc\", client_secret=\"xyz\",\n redirect_uris=[],\n authorized_grants=[oauth2.grant.ClientCredentialsGrant.grant_type])\n\n token_store = TokenStore()\n\n # Generator of tokens\n token_generator = oauth2.tokengenerator.Uuid4()\n token_generator.expires_in[oauth2.grant.ClientCredentialsGrant.grant_type] = 3600\n\n provider = Provider(access_token_store=token_store,\n auth_code_store=token_store, client_store=client_store,\n token_generator=token_generator)\n # provider.add_grant(AuthorizationCodeGrant(site_adapter=TestSiteAdapter()))\n provider.add_grant(ClientCredentialsGrant())\n\n try:\n app = Application([\n url(provider.authorize_path, OAuth2Handler, dict(provider=provider)),\n url(provider.token_path, OAuth2Handler, dict(provider=provider)),\n ])\n\n app.listen(8080)\n print(\"Starting OAuth2 server on http://localhost:8080/...\")\n IOLoop.current().start()\n\n except KeyboardInterrupt:\n IOLoop.close()\n\n\ndef main():\n auth_server = Process(target=run_auth_server)\n auth_server.start()\n app_server = Process(target=run_app_server)\n app_server.start()\n print(\"Access http://localhost:8081/app in your browser\")\n\n def sigint_handler(signal, frame):\n print(\"Terminating servers...\")\n auth_server.terminate()\n auth_server.join()\n app_server.terminate()\n app_server.join()\n\n signal.signal(signal.SIGINT, sigint_handler)\n\nif __name__ == \"__main__\":\n main()", "sub_path": "dxb/libs/oauthlib.py", "file_name": "oauthlib.py", "file_ext": "py", "file_size_in_byte": 3407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "tdaemon.handler.APIHandler", "line_number": 25, "usage_type": "name"}, {"api_name": "oauth2.store.memory.ClientStore", "line_number": 28, "usage_type": "call"}, {"api_name": "oauth2.grant", "line_number": 31, "usage_type": "attribute"}, {"api_name": "oauth2.store.memory.TokenStore", "line_number": 33, "usage_type": "call"}, {"api_name": "oauth2.tokengenerator.Uuid4", "line_number": 36, "usage_type": "call"}, {"api_name": "oauth2.tokengenerator", "line_number": 36, "usage_type": "attribute"}, {"api_name": "oauth2.grant", "line_number": 37, "usage_type": "attribute"}, {"api_name": "oauth2.Provider", "line_number": 39, "usage_type": "call"}, {"api_name": "oauth2.grant.ClientCredentialsGrant", "line_number": 43, "usage_type": "call"}, {"api_name": "tdaemon.libs.utils.format_error", "line_number": 45, "usage_type": "call"}, {"api_name": "tdaemon.libs.utils", "line_number": 45, "usage_type": "name"}, {"api_name": "oauth2.store.memory.ClientStore", "line_number": 50, "usage_type": "call"}, {"api_name": "oauth2.grant", "line_number": 53, "usage_type": "attribute"}, {"api_name": "oauth2.store.memory.TokenStore", "line_number": 55, "usage_type": "call"}, {"api_name": "oauth2.tokengenerator.Uuid4", "line_number": 58, "usage_type": "call"}, {"api_name": "oauth2.tokengenerator", "line_number": 58, "usage_type": "attribute"}, {"api_name": "oauth2.grant", "line_number": 59, "usage_type": "attribute"}, {"api_name": "oauth2.Provider", "line_number": 61, "usage_type": "call"}, {"api_name": "oauth2.grant.ClientCredentialsGrant", "line_number": 65, "usage_type": "call"}, {"api_name": "tornado.web.Application", "line_number": 68, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 69, "usage_type": "call"}, {"api_name": "oauth2.web.tornado.OAuth2Handler", "line_number": 69, "usage_type": "argument"}, {"api_name": "tornado.web.url", "line_number": 70, "usage_type": "call"}, {"api_name": "oauth2.web.tornado.OAuth2Handler", "line_number": 70, "usage_type": "argument"}, {"api_name": "tornado.ioloop.IOLoop.current", "line_number": 75, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop", "line_number": 75, "usage_type": "name"}, {"api_name": "tornado.ioloop.IOLoop.close", "line_number": 78, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop", "line_number": 78, "usage_type": "name"}, {"api_name": "multiprocessing.process.Process", "line_number": 82, "usage_type": "call"}, {"api_name": "multiprocessing.process.Process", "line_number": 84, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 95, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 95, "usage_type": "attribute"}]} {"seq_id": "333947638", "text": "# coding: utf-8\nimport enum\nfrom core.network.timeout import Timeout\n\n\nclass EntityTypes(enum.Enum):\n LOADCASE = \"loadcase\"\n SIMULATION = \"simulation\"\n TASK = \"task\"\n STYPE = \"submodelType\"\n SUBMODEL = \"submodel\"\n\n\nclass AbstractEntity(object):\n \"\"\"\n Class containing common behaviour of main CML-Bench entities, such as loadcases, simulations, tasks\n \"\"\"\n def __init__(self, app_session, identifier):\n self._app_session = app_session\n self._identifier = identifier\n self._http_session = self._app_session.session\n self._sender = self._app_session.sender\n self._handler = self._app_session.handler\n\n self._entity_type = None\n self._name = None\n self._parent_id = None\n self._tree_path = None\n self._tree_id = None\n\n def __repr__(self):\n return \"Entity type: {} | Entity ID: {}\".format(self.entity_type, self.identifier)\n\n @property\n def entity_type(self):\n return self._entity_type\n\n @property\n def identifier(self):\n return self._identifier\n\n @property\n def name(self):\n return self._name\n\n @property\n def parent_id(self):\n return self._parent_id\n\n @property\n def tree_path(self):\n return self._tree_path\n\n @property\n def tree_id(self):\n return self._tree_id\n\n def _set_entity_type(self, entity_type):\n if isinstance(entity_type, EntityTypes):\n self._entity_type = entity_type\n\n def _setup_attributes(self):\n if self.entity_type:\n response = self._sender.send_entity_base_info_request(self.identifier, self.entity_type.value)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n base_info = self._handler.handle_response_to_entity_base_info_request()\n self._name = base_info.get(\"name\")\n self._parent_id = base_info.get(\"parent_id\")\n self._tree_path = base_info.get(\"tree_path\")\n self._tree_id = base_info.get(\"tree_id\")\n\n\nclass Loadcase(AbstractEntity):\n \"\"\"\n Class for representation of the loadcase entity\n \"\"\"\n def __init__(self, app_session, identifier):\n super().__init__(app_session, identifier)\n self._set_entity_type(EntityTypes.LOADCASE)\n self._setup_attributes()\n\n def get_list_of_simulations(self):\n \"\"\"\n Method for getting a list of simulations, belonging to the loadcase\n :return: list of simulation objects, or None if some error occurred during reading simulations\n \"\"\"\n response = self._sender.send_loadcase_simulations_request(self.identifier)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n simulation_ids_list = self._handler.handle_response_to_loadcase_simulations_request()\n if simulation_ids_list:\n simulations = []\n for simulation_id in simulation_ids_list:\n simulations.append(Simulation(self._app_session, simulation_id))\n return simulations\n return None\n\n\nclass Simulation(AbstractEntity):\n \"\"\"\n Class for representation of the simulation entity\n \"\"\"\n def __init__(self, app_session, identifier):\n super().__init__(app_session, identifier)\n self._set_entity_type(EntityTypes.SIMULATION)\n self._setup_attributes()\n\n def clone(self):\n \"\"\"\n Method for creating a new simulation, based on the current one\n :return: id of the new simulation, or None if failed to clone simulation\n \"\"\"\n response = self._sender.send_clone_simulation_request(self.identifier)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n return self._handler.handle_response_to_clone_simulation_request()\n\n def get_parent_loadcase(self):\n \"\"\"\n Method for getting a parent loadcase\n :return: loadcase object\n \"\"\"\n return Loadcase(self._app_session, self.parent_id)\n\n def get_list_of_tasks(self):\n \"\"\"\n Method for getting a list of tasks, belonging to the simulation\n :return: list of task objects, of None if some error occurred during reading tasks\n \"\"\"\n response = self._sender.send_simulation_tasks_request(self.identifier)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n simulation_tasks_list = self._handler.handle_response_to_simulation_tasks_request()\n if simulation_tasks_list:\n tasks = []\n for task_id in simulation_tasks_list:\n tasks.append(Task(self._app_session, task_id))\n return tasks\n return None\n\n def get_list_of_submodels(self):\n response = self._sender.send_simulation_submodels_request(self.identifier)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n simulation_submodels_list = self._handler.handle_response_to_simulation_submodels_request()\n if simulation_submodels_list:\n submodels = []\n for submodel_id in simulation_submodels_list:\n submodels.append(Submodel(self._app_session, submodel_id))\n return submodels\n return None\n\n def add_new_sumbodels(self, new_submodel_ids):\n # First, get list of current submodels\n response = self._sender.send_simulation_submodels_request(self.identifier)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n simulation_submodels_list = self._handler.handle_response_to_simulation_submodels_request()\n if not simulation_submodels_list:\n simulation_submodels_list = []\n # Append new submodel to the list of existing submodels\n simulation_submodels_list.extend(new_submodel_ids)\n # Send request to update simulation submodels (that's how it works in CML-Bench)\n response = self._sender.send_simulation_submodels_update_request(self.identifier, simulation_submodels_list)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n simulation_submodels_list = self._handler.handle_response_to_simulation_submodels_request()\n if simulation_submodels_list:\n submodels = []\n for submodel_id in simulation_submodels_list:\n submodels.append(Submodel(self._app_session, submodel_id))\n return submodels\n return None\n\n\nclass Task(AbstractEntity):\n \"\"\"\n Class for representation of the task entity\n \"\"\"\n def __init__(self, app_session, identifier):\n super().__init__(app_session, identifier)\n self._set_entity_type(EntityTypes.TASK)\n self._setup_attributes()\n\n def get_status(self):\n response = self._sender.send_task_status_request(self.identifier)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n return self._handler.handle_response_to_task_status_response()\n\n\nclass SubmodelType(AbstractEntity):\n \"\"\"\n Class for representation of the s|type entity\n \"\"\"\n def __init__(self, app_session, identifier):\n super().__init__(app_session, identifier)\n self._set_entity_type(EntityTypes.STYPE)\n self._setup_attributes()\n\n def get_list_of_submodels(self):\n response = self._sender.send_stype_submodels_request(self.tree_path)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n stype_submodels_list = self._handler.handle_response_to_stype_submodels_requests()\n if stype_submodels_list:\n submodels = []\n for submodel_id in stype_submodels_list:\n submodels.append(Submodel(self._app_session, submodel_id))\n return submodels\n return None\n\n def upload_new_submodel(self, *files, **params):\n if \"stype\" in params.keys():\n stype = SubmodelType(self._app_session, params.get(\"stype\"))\n else:\n stype = SubmodelType(self._app_session, self._app_session.cfg.server_storage)\n\n if \"add_to_clipboard\" in params.keys():\n add_to_clipboard = \"on\" if bool(params.get(\"add_to_clipboard\")) else \"off\"\n else:\n add_to_clipboard = \"off\"\n\n submodels = []\n for file in files:\n response = self._sender.send_upload_submodel_request(file, stype.tree_id, add_to_clipboard)\n Timeout.hold_your_horses()\n self._handler.set_response(response)\n sumbodel_id = self._handler.handle_response_to_upload_submodel_request()\n if sumbodel_id:\n submodels.append(Submodel(self._app_session, sumbodel_id))\n\n return submodels\n\n\nclass Submodel(AbstractEntity):\n \"\"\"\n Class for representation of the submodel entity\n \"\"\"\n def __init__(self, app_session, identifier):\n super().__init__(app_session, identifier)\n self._set_entity_type(EntityTypes.SUBMODEL)\n self._setup_attributes()\n", "sub_path": "src/core/bench/entities.py", "file_name": "entities.py", "file_ext": "py", "file_size_in_byte": 8964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "enum.Enum", "line_number": 6, "usage_type": "attribute"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 65, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 65, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 89, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 89, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 115, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 115, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 132, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 132, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 144, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 144, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 157, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 157, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 166, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 166, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 188, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 188, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 204, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 204, "usage_type": "name"}, {"api_name": "core.network.timeout.Timeout.hold_your_horses", "line_number": 228, "usage_type": "call"}, {"api_name": "core.network.timeout.Timeout", "line_number": 228, "usage_type": "name"}]} {"seq_id": "518062085", "text": "# To run this, you can install BeautifulSoup\n# https://pypi.python.org/pypi/beautifulsoup4\n\n# Or download the file\n# http://www.py4e.com/code3/bs4.zip\n# and unzip it in the same directory as this file\n\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter - ')\n# url='http://py4e-data.dr-chuck.net/known_by_Fikret.html'\n# position = 2\nposition = input('Enter position:')\nposition = int(position) - 1\n# repeat = 4\nrepeat = input('Enter Count:')\nrepeat = int(repeat)\n\n# Retrieve all of the anchor tags\ndef anchor():\n html = urllib.request.urlopen(url, context=ctx).read()\n soup = BeautifulSoup(html, 'html.parser')\n links=list()\n tags = soup('a')\n for tag in tags:\n # print(tag)\n tag=(tag.get('href', None))\n # print(tag)\n links.append(tag)\n #print(links)\n link=links[position]\n #print('New URL', link)\n return link\n\nwhile repeat > 0:\n nurl = anchor()\n #print(nurl)\n url=nurl\n print(url)\n repeat=repeat - 1\n", "sub_path": "pawd/code3/urllinks.py", "file_name": "urllinks.py", "file_ext": "py", "file_size_in_byte": 1159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "ssl.create_default_context", "line_number": 13, "usage_type": "call"}, {"api_name": "ssl.CERT_NONE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlopen", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 28, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 28, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 29, "usage_type": "call"}]} {"seq_id": "591284969", "text": "#!/usr/bin/env python3\r\n'''\r\nwebapp.py sends queries from the frontend to the backend.\r\nIt loads and updates pages and processes data in a form easy\r\nfor the html to present.\r\n'''\r\n\r\nimport flask\r\nfrom flask import render_template, request\r\nimport json\r\nimport sys\r\nfrom datasource import *\r\nimport psycopg2\r\n\r\napp = flask.Flask(__name__)\r\nconnection = psycopg2.connect(database=\"huhe\", user=\"huhe\", password=\"tree695eye\")\r\ndataSource = DataSource(connection)\r\n\r\n\r\ndef getStateQueryData(startYear, endYear, state):\r\n\t'''\r\n\tReturns the average annual rate of homicide in a state (per 100,000 people),\r\n\tthe national average annual rate of homicide (per 100,000 people),\r\n\ta list of rates of homicide over each specified year within the state,\r\n\ta list containing each year in the specified range (for our Javascript file), and the\r\n\tcauses of homicide along with the percentage of total homicides they\r\n\tcontributed, if accurate data for each said cause is provided.\r\n\r\n\tPARAMETERS:\r\n\t\tstartYear - the first year of data to draw from\r\n\t\tendYear - the last year of data to draw from\r\n\t\tstate - the name of the state to draw data from\r\n\r\n\tRETURN:\r\n\t\tA dictionary containing the average annual rate homicide in the nation and\r\n\t\tstate, a list of annual rates of homicide in the state,\r\n\t\ta list of the years in the specified range, and another dictionary storing each cause and the percentage of\r\n\t\thomicides it was responsible for\r\n\r\n\tCalls getStateCrudeRate, getCausesAndPercentages, getStateSingleYearCrudeRates, getYearRange,\r\n\tand getNationalCrudeRate\r\n\t'''\r\n\tdataTable = {}\r\n\tfullList = dataSource.getStateQuery(startYear, endYear, state)\r\n\r\n\tif isinstance(fullList, Exception):\r\n\t\traise fullList\r\n\r\n\tdataTable[\"yearRange\"] = getYearRange(startYear, endYear)\r\n\tdataTable[\"singleYearCrudeRates\"] = getStateSingleYearCrudeRates(startYear, endYear, state)\r\n\r\n\tdataTable[\"stateCrudeRate\"] = getStateCrudeRate(fullList)\r\n\tdataTable[\"causesAndPercentages\"] = getCausesAndPercentages(fullList)\r\n\r\n\tnationTotals = dataSource.getUSATotals(startYear, endYear)\r\n\tdataTable[\"nationalCrudeRate\"] = getNationalCrudeRate(nationTotals)\r\n\r\n\treturn dataTable\r\n\r\n\r\ndef getStateSingleYearCrudeRates(startYear, endYear, state):\r\n\t'''\r\n\tGets the rate of homicide within the specified state over each year from startYear to endYear,\r\n\tplaces all of these crude rates into a list of ints and returns this list\r\n\r\n PARAMETERS:\r\n startYear: the first year to find the homicide crude rate for\r\n endYear: the last year to find the homicide crude rate for\r\n state: the state to find the homicide crude rate for\r\n\r\n RETURN:\r\n A list of ints each representing the rate of homicide per 100,000 people in\r\n\teach year within the specified range\r\n\r\n Calls getStateCrudeRate\r\n '''\r\n\tlist = []\r\n\trate = 0\r\n\tcrudeRates = []\r\n\r\n\tfor year in range (startYear, endYear + 1):\r\n\t\tlist = dataSource.getStateQuery(year, year, state)\r\n\t\trate = getStateCrudeRate(list)\r\n\t\tcrudeRates.append(rate)\r\n\r\n\treturn crudeRates\r\n\r\n\r\ndef getStateCrudeRate(list):\r\n\t'''\r\n\tReturns the average annual rate of homicide in a state (per 100,000 people) over the\r\n\tspecified year range. If no data was given over this year range (no population of deaths),\r\n\twe return 0.\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tA int representing the average annual number of homicides in the user's\r\n\t\trequested state (per 100,000) rounded to 3 decimal places or 0 if no valid data was given\r\n\r\n\tCalls getAverageStateDeaths, getAverageStatePopulation\r\n\t'''\r\n\taverageDeaths = getAverageStateDeaths(list)\r\n\taveragePopulation = getAverageStatePopulation(list)\r\n\tif(averagePopulation == 0):\r\n\t\treturn 0\r\n\t\r\n\treturn round(averageDeaths*100000/averagePopulation, 3)\r\n\r\n\r\ndef getAverageStateDeaths(list):\r\n\t'''\r\n\tReturns the average annual number of homicides in a state (per 100,000 people)\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tThe average annual number of homicides in the user's requested state (per 100,000)\r\n\r\n\tCalls getAverageStateDeaths, getAverageStatePopulation\r\n\t'''\r\n\ttupleIndex = 0;\r\n\tstateTotal = 0;\r\n\tnumYears = len(list)\r\n\r\n\tfor year in list:\r\n\t\ttupleIndex = len(year) - 2\r\n\t\tif(tupleIndex > 0):\r\n\t\t\tstateTotal += year[tupleIndex][5]\r\n\r\n\treturn stateTotal/numYears\r\n\r\n\r\ndef getAverageStatePopulation(list):\r\n\t'''\r\n\tReturns the average annual population of the state over user's queried year range\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tThe average annual population of the user's specified state over the user's\r\n\t\tspecified year range\r\n\t'''\r\n\tnumYears = len(list)\r\n\ttotal = 0\r\n\r\n\tfor year in list:\r\n\t\tif(len(year) > 1):\r\n\t\t\ttotal += year[0][6]\r\n\r\n\treturn total/numYears\r\n\r\n\r\ndef getYearRange(startYear, endYear):\r\n\t'''\r\n\treturns a list containing each year (as an int) from startYear to endYear\r\n\r\n\tPARAMETERS:\r\n\tstartYear: the first year to store in the list\r\n\tendYear: the last year to store in the list\r\n\r\n\tRETURN:\r\n\tA list of ints, starting from startYear and increasing sequentially\r\n\tup to and including endYear\r\n\t'''\r\n\tlist = []\r\n\r\n\tfor year in range(startYear, endYear + 1):\r\n\t\tlist.append(year)\r\n\r\n\treturn list\r\n\r\n\r\ndef getNationalCrudeRate(list):\r\n\t'''\r\n\tReturns the national average annual rate of homicide per 100,000 people\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tThe national average annual rate of homicide per 100,000 people over the\r\n\t\tyear range the user queried for\r\n\r\n\tCalls getNationalAverageDeaths and getAverageNationalPopulation\r\n\t'''\r\n\taverageDeaths = getNationalAverageDeaths(list)\r\n\taveragePopulation = getAverageNationalPopulation(list)\r\n\r\n\treturn round(averageDeaths*100000/averagePopulation, 3)\r\n\r\n\r\ndef getNationalAverageDeaths(list):\r\n\t'''\r\n\tReturns the average annual number of homicides across the nation\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tThe national average annual number of homicides\r\n\t'''\r\n\ttotal = 0\r\n\ttupleIndex = 0\r\n\tnumYears = len(list)\r\n\r\n\tfor year in list:\r\n\t\ttupleIndex = len(year) - 1\r\n\t\t\r\n\t\tif(tupleIndex > 0):\r\n\t\t\ttotal += year[tupleIndex][5]\r\n\r\n\treturn total/numYears\r\n\r\n\r\ndef getAverageNationalPopulation(list):\r\n\t'''\r\n\tReturns the nation's average population over the user's specified year range\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tThe national average population over the specified year range\r\n\t'''\r\n\tnumYears = len(list)\r\n\ttotal = 0\r\n\ttupleIndex = 0\r\n\r\n\tfor year in list:\r\n\t\ttupleIndex = len(year) - 1\r\n\r\n\t\tif(tupleIndex > 0):\r\n\t\t\ttotal += year[tupleIndex][6]\r\n\r\n\r\n\treturn total/numYears\r\n\r\n\r\ndef getCausesAndPercentages(list):\r\n\t'''\r\n\tReturns a dictionary with each key being a cause of homicide and each value being the\r\n\tpercentage of homicides the associated cause was responsible for\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tA dictionary with each key being a cause of homicide and each value being the\r\n\t\tpercentage of homicides the associated cause was responsible for\r\n\r\n\tCalls isValidCause, getPercent, and getPercentOther\r\n\t'''\r\n\tlastIndex = len(list[0]) - 3\r\n\tcausesList = {}\r\n\r\n\tfor index in range(lastIndex):\r\n\t\tcause = list[0][index][3]\r\n\t\tif(isValidCause(cause, list)):\r\n\t\t\tcausesList[cause] = getPercent(cause, list)\r\n\r\n\tcausesList[\"Other\"] = getPercentOther(causesList, list)\r\n\r\n\treturn causesList\r\n\r\n\r\ndef isValidCause(cause, list):\r\n\t'''\r\n\tDetermines whether the inputted cause has valid data. More specifically, this method\r\n\tchecks whether the data for this cause was omitted in any of the specified years\r\n\tand does not regard it as valid in this case.\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tA True value if there was data for this cause every year and a False value otherwise\r\n\t'''\r\n\tfoundAllYears = True\r\n\r\n\tfor year in list:\r\n\t\tfoundThisYear = False\r\n\t\tlastIndex = len(year) - 3\r\n\r\n\t\tfor index in range(lastIndex):\r\n\t\t\tif(year[index][3] == cause):\r\n\t\t\t\tfoundThisYear = True\r\n\r\n\t\tfoundAllYears = foundAllYears and foundThisYear\r\n\r\n\treturn foundAllYears\r\n\r\n\r\ndef getPercent(cause, list):\r\n\t'''\r\n\tReturns the percentage of total homicides the specified cause of homicide was responsible\r\n\tfor\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tA String representing a number with at most 3 decimal places representing the percentage\r\n\t\tof deaths the specified cause was responsible for\r\n\t'''\r\n\ttotalDeathsByCause = getTotalDeathsByCause(cause, list)\r\n\tnumberOfYears = len(list)\r\n\ttotalDeaths = getAverageStateDeaths(list)*numberOfYears\r\n\r\n\treturn round(totalDeathsByCause * 100/totalDeaths, 3)\r\n\r\n\r\ndef getTotalDeathsByCause(cause, list):\r\n\t'''\r\n\tReturns the total number of deaths the specified cause was responsible for\r\n\tover the user's queried year range in the specified state\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tAn integer representing the total number of homicides the specified cause contributed\r\n\t'''\r\n\ttotalDeaths = 0\r\n\r\n\tfor year in list:\r\n\t\tlastIndex = len(year) - 3\r\n\r\n\t\tfor index in range(lastIndex):\r\n\t\t\tif(year[index][3] == cause):\r\n\t\t\t\ttotalDeaths += year[index][5]\r\n\r\n\treturn totalDeaths\r\n\r\n\r\ndef getPercentOther(causesList, list):\r\n\t'''\r\n\tReturns the percentage of homicides over the user's queried year range and specified state\r\n\tnot caused by any of the valid causes already found\r\n\r\n\tPARAMETERS:\r\n\t\tlist - an array of state homicide data for each year in the range the user queried\r\n\r\n\tRETURN:\r\n\t\tA String representation of a float rounded to 3 decimal places representing the\r\n\t\tpercentage of homicides not caused by any of the specified causes\r\n\t'''\r\n\tpercentageKnown = 0\r\n\r\n\tfor cause in causesList:\r\n\t\tpercentageKnown += causesList[cause]\r\n\r\n\treturn round(100 - percentageKnown, 3)\r\n\r\n\r\ndef adjustYears(startYear, endYear):\r\n\t'''\r\n\tAdjusts the start and end years to be the same year if only one is specified\r\n\tand sets the start to 1999 and end to 2017 if neither is specified.\r\n\r\n\tPARAMETERS:\r\n\t\tstartYear- the start year specified by the user\r\n\t\tendYear- the ending year specified by the user\r\n\r\n\tRETURN:\r\n\t\tThe start year and end year returned as Strings\r\n\t'''\r\n\tif(startYear is None):\r\n\t\tstartYear = \"\"\r\n\r\n\tif(endYear is None):\r\n\t\tendYear = \"\"\r\n\r\n\tif(startYear == \"\" and endYear == \"\"):\r\n\t\tstartYear = \"1999\"\r\n\t\tendYear = \"2017\"\r\n\r\n\telif(startYear == \"\"):\r\n\t\tstartYear = endYear\r\n\r\n\telif(endYear == \"\"):\r\n\t\tendYear = startYear\r\n\r\n\treturn startYear, endYear\r\n\r\n\r\ndef setYearsToInts(startYear, endYear):\r\n\t'''\r\n\tConverts the inputted start year and end year to ints.\r\n\r\n\tPARAMETERS:\r\n\t\tstartYear- the starting year for the query passed as a String\r\n\t\tendYear- the ending year for the query passed as a String\r\n\r\n\tRETURN:\r\n\t\tthe start year String converted into an int and the end year String\r\n\t\tconverted into an int\r\n\t'''\r\n\r\n\tstartYear = int(startYear)\r\n\tendYear = int(endYear)\r\n\r\n\treturn startYear, endYear\r\n\r\n\r\ndef cleanStateInput(state):\r\n\t'''\r\n\tRe-formats the inputted state to be usable in a SQL query. More specifically, this function\r\n\treturns a String with the leading and trailing white space of the input removed and each word\r\n\twithin the string (except conjunctions/prepositions like \"of\" or \"and\") capitalized.\r\n\tIf no string was specified, we simply return \"Alabama\"\r\n\r\n\tPARAMETERS:\r\n\t\tstate: the user inputted string representing the state they want to query over\r\n\r\n\tRETURN:\r\n\t\ta String representing the user's inputted state, but with the first letter of\r\n\t\teach word capitalized (except for prepositions and conjunctions) or Alabama if\r\n\t\tno String was entered.\r\n\t'''\r\n\tstate = state.strip()\r\n\r\n\tif state == \"\":\r\n\t\tstate = \"Alabama\"\r\n\r\n\tcorrectedState = \"\"\r\n\twordList = state.split(\" \")\r\n\r\n\tfor word in wordList:\r\n\t\tcorrectedWord = cleanIndividualWord(word)\r\n\t\tcorrectedState = correctedState + correctedWord + \" \"\r\n\r\n\tcorrectedState = correctedState.strip()\r\n\r\n\treturn correctedState\r\n\r\n\r\ndef cleanIndividualWord(word):\r\n\t'''\r\n\tReturns the inputted word with the first letter capitalized unless\r\n\tthe inputted word is a preposition or conjunction.\r\n\r\n\tPARAMETERS:\r\n\t\tword- the word to capitalize (or not capitalize)\r\n\r\n\tRETURN:\r\n\t\ta String representing the word, but capitalized if necessary\r\n\t'''\r\n\tnonCapitalizedWords = [\"a\", \"an\", \"for\", \"and\", \"or\", \"nor\", \"but\", \"yet\", \"so\", \"at\",\r\n\t \"around\", \"by\", \"after\", \"along\", \"from\", \"of\", \"on\", \"to\", \"with\", \"without\"]\r\n\tword = word.lower()\r\n\tif word not in nonCapitalizedWords:\r\n\t\tword = word[0].capitalize() + word[1:]\r\n\r\n\treturn word\r\n\r\n\r\ndef getNationalQueryData(startYear, endYear):\r\n\t'''\r\n\tReturns the average annual rate of homicide per 100,000 people across the nation over the specified\r\n\tyear range, the state/region with the highest average annual rate of homicide over this range and\r\n\tits rate of homicide, a list containing each year within the specified range (for our Javascript files),\r\n\tand the rate of homicide in each individual year in the specified range stored in a list.\r\n\r\n\tPARAMETERS:\r\n\t\tstartYear- the first year to collect national data for\r\n\t\tendYear- the last year over which we will collect data\r\n\r\n\tRETURN:\r\n\t\tA dictionary with keys: \"nationalCrudeRate\" - the national rate of homicide per 100,000 people\r\n\t\tover the specified years, \"mostDangerousState\" - the state with the highest homicide rate,\r\n\t\t\"mostDangerousStateRate\" - the rate of homicide of the most dangerous state,\r\n\t\t\"yearRange\" - a list of years, beginning with the start year and ending with the end year, and\r\n\t\t\"singleYearCrudeRates\" - the national rate of homicide each year in the range, stored in a list\r\n\t'''\r\n\tnationalQueryData = {}\r\n\tnationTotals = dataSource.getUSATotals(startYear, endYear)\r\n\tif isinstance(nationTotals, Exception):\r\n\t\traise nationTotals\r\n\r\n\tnationalQueryData[\"nationalCrudeRate\"] = getNationalCrudeRate(nationTotals)\r\n\tnationalQueryData[\"mostDangerousState\"], nationalQueryData[\"mostDangerousStateRate\"] = getMostDangerousStateAndData(startYear, endYear)\r\n\tnationalQueryData[\"yearRange\"] = getYearRange(startYear, endYear)\r\n\tnationalQueryData[\"singleYearCrudeRates\"] = getNationalSingleYearCrudeRates(startYear, endYear)\r\n\r\n\treturn nationalQueryData\r\n\r\n\r\ndef getMostDangerousStateAndData(startYear, endYear):\r\n\t'''\r\n\tReturns the US state with the highest rate of homicide over the specified\r\n\trange of years and the rate of homicide within this state per 100,000 people in the\r\n\tpopulation\r\n\r\n\tPARAMETERS:\r\n\t\tstartYear - the first year over which to find homicide data\r\n\t\tendYear- the last year to collect homicide data over\r\n\r\n\tRETURN:\r\n\t\tThe most dangerous state, returned as a string, and the state's\r\n\t\taverage annual rate of homicide over the specified range (per 100,000 people)\r\n\t\treturned as an int\r\n\t'''\r\n\tcrudeRate = 0\r\n\tcurrentStateRate = 0\r\n\tmostDangerousState = \"\"\r\n\r\n\tfor state in dataSource.stateDictionary:\r\n\t\tcurrentStateRate = getStateCrudeRate(dataSource.getStateQuery(startYear, endYear, state))\r\n\r\n\t\tif (currentStateRate > crudeRate):\r\n\t\t\tcrudeRate = currentStateRate\r\n\t\t\tmostDangerousState = state\r\n\r\n\treturn mostDangerousState, crudeRate\r\n\r\n\r\ndef getNationalSingleYearCrudeRates(startYear, endYear):\r\n\t'''\r\n\tGets the national rate of homicide over each year from startYear to endYear, places all of these\r\n\tcrude rates into a list of ints and returns this list\r\n\r\n PARAMETERS:\r\n startYear: the first year to find the homicide crude rate for\r\n endYear: the last year to find the homicide crude rate for\r\n\r\n RETURN:\r\n A list of ints each representing the national rate of homicide per 100,000 people in\r\n\teach year within the specified range\r\n\r\n Calls getNationalCrudeRate\r\n '''\r\n\tlist = []\r\n\trate = 0\r\n\tcrudeRates = []\r\n\r\n\tfor year in range (startYear, endYear + 1):\r\n\t\tlist = dataSource.getUSATotals(year, year)\r\n\t\trate = getNationalCrudeRate(list)\r\n\t\tcrudeRates.append(rate)\r\n\r\n\treturn crudeRates\r\n\r\n\r\n@app.route('/', methods = ['POST', 'GET'])\r\ndef getNationalQueryResults():\r\n\t'''\r\n\tLoads the homepage and returns a results page corresponding to the user's query. Directs\r\n\tuser to an error page if the query was not formatted properly\r\n\t'''\r\n\ttry:\r\n\t\tstart = request.args.get('startYear')\r\n\t\tend = request.args.get('endYear')\r\n\t\tstart, end = adjustYears(start, end)\r\n\t\tstart, end = setYearsToInts(start, end)\r\n\r\n\t\tdataTable = getNationalQueryData(start, end)\r\n\t\t\r\n\t\treturn render_template('HomePage2.html',\r\n\t\t\t\t\t\t\t\t\tinputdata = dataTable[\"singleYearCrudeRates\"],\r\n\t\t\t\t\t\t\t\t\tinputlabels = dataTable[\"yearRange\"],\r\n\t\t\t\t\t\t\t\t\tinputtitle = f\"National Homicide Rate from {{start}} to {{end}}\",\r\n\t\t\t\t\t\t\t\t\tnationalCrudeRate = dataTable[\"nationalCrudeRate\"],\r\n\t\t\t\t\t\t\t\t\tstartYear = start,\r\n\t\t\t\t\t\t\t\t\tendYear = end,\r\n\t\t\t\t\t\t\t\t\tmostDangerousState = dataTable[\"mostDangerousState\"],\r\n\t\t\t\t\t\t\t\t\tmostDangerousStateRate = dataTable[\"mostDangerousStateRate\"])\r\n\r\n\texcept Exception as e:\r\n\r\n\t\treturn render_template('Error.html', error = e)\r\n\r\n\r\n@app.route('/stateQuery/')\r\ndef getMapQueryResults():\r\n\t'''\r\n\tLoads a resulting state query page if the user clicks on one of the states in the\r\n\tinteractive map or otherwise queries for a state\r\n\t'''\r\n\tif(request.method == 'GET'):\r\n\r\n\t\ttry:\r\n\t\t\tstart = request.args.get('startYear')\r\n\t\t\tend = request.args.get('endYear')\r\n\t\t\tstart, end = adjustYears(start, end)\r\n\t\t\tstart, end = setYearsToInts(start, end)\r\n\t\t\tstate = request.args.get('state')\r\n\t\t\tstate = cleanStateInput(state)\r\n\t\t\t\r\n\t\t\tdataTable = getStateQueryData(start, end, state)\r\n\t\t\t\r\n\t\t\treturn render_template('Results.html', stateCrudeRate = dataTable[\"stateCrudeRate\"],\r\n\t\t\t\t\t\t\t\t\t\tnationalCrudeRate = dataTable[\"nationalCrudeRate\"],\r\n\t\t\t\t\t\t\t\t\t\tcausesAndPercentages = dataTable[\"causesAndPercentages\"],\r\n\t\t\t\t\t\t\t\t\t\tstate = state,\r\n\t\t\t\t\t\t\t\t\t\tstartYear = start,\r\n\t\t\t\t\t\t\t\t\t\tendYear = end,\r\n\t\t\t\t\t\t\t\t\t\tinputdata = dataTable[\"singleYearCrudeRates\"],\r\n\t\t\t\t\t\t\t\t\t\tinputlabels = dataTable[\"yearRange\"],\r\n\t\t\t\t\t\t\t\t\t\tinputtitle = f\"{state} Annual Crude Rates\",\r\n\t\t\t\t\t\t\t\t\t\tinputpiedata= list(dataTable[\"causesAndPercentages\"].values()),\r\n\t\t\t\t\t\t\t\t\t\tinputpielabels= list(dataTable[\"causesAndPercentages\"].keys()),\r\n\t\t\t\t\t\t\t\t\t\tinputpietitle=f\"{state} Homicide Data by Cause of Death\")\r\n\r\n\t\texcept Exception as e:\r\n\r\n\t\t\treturn render_template('Error.html', error = e)\r\n\r\n\telse:\r\n\r\n\t\tstate = Alabama\r\n\t\tstart = 1999\r\n\t\tend = 2017\r\n\t\tdataTable = getStateQueryData(start, end, state)\r\n\r\n\t\treturn render_template('Results.html', stateCrudeRate = dataTable[\"stateCrudeRate\"],\r\n\t\t\t\t\t\t\t\t\t\tnationalCrudeRate = dataTable[\"nationalCrudeRate\"],\r\n\t\t\t\t\t\t\t\t\t\tcausesAndPercentages = dataTable[\"causesAndPercentages\"],\r\n\t\t\t\t\t\t\t\t\t\tstate = state,\r\n\t\t\t\t\t\t\t\t\t\tstartYear = start,\r\n\t\t\t\t\t\t\t\t\t\tendYear = end)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tif len(sys.argv) != 3:\r\n\t\tprint('Usage: {0} host port'.format(sys.argv[0]), file=sys.stderr)\r\n\t\texit()\r\n\r\n\thost = sys.argv[1]\r\n\tport = sys.argv[2]\r\n\tapp.run(host=host, port=port)\r\n", "sub_path": "Backend/webapp.py", "file_name": "webapp.py", "file_ext": "py", "file_size_in_byte": 19246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 555, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 555, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 555, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 556, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 556, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 556, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 562, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 574, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 583, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 583, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 586, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 586, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 586, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 587, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 587, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 587, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 590, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 590, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 590, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 595, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 610, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 619, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 628, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 629, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 629, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 632, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 633, "usage_type": "attribute"}]} {"seq_id": "134158455", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AreaInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('atitle', models.CharField(max_length=20)),\n ('aParent', models.ForeignKey(blank=True, to='usermode.AreaInfo', null=True)),\n ],\n options={\n 'db_table': 'AreaInfo',\n },\n ),\n migrations.CreateModel(\n name='CartInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('count', models.IntegerField()),\n ],\n options={\n 'db_table': 'CartInfo',\n },\n ),\n migrations.CreateModel(\n name='GoodsInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('gtitle', models.CharField(max_length=20)),\n ('gprice', models.DecimalField(max_digits=5, decimal_places=2)),\n ('gdesc', models.CharField(max_length=200)),\n ('gdetail', models.CharField(max_length=1000)),\n ('gpic', models.CharField(max_length=200)),\n ('gunit', models.CharField(max_length=8)),\n ('isDelete', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'GoodsInfo',\n },\n ),\n migrations.CreateModel(\n name='OrderDetailInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('count', models.IntegerField()),\n ('price', models.DecimalField(max_digits=8, decimal_places=2)),\n ('goods', models.ForeignKey(to='usermode.GoodsInfo')),\n ],\n options={\n 'db_table': 'OrderDetailInfo',\n },\n ),\n migrations.CreateModel(\n name='OrderInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('otime', models.DateTimeField()),\n ('ototal', models.DecimalField(max_digits=8, decimal_places=2)),\n ('state', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'OrderInfo',\n },\n ),\n migrations.CreateModel(\n name='TypeInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=20)),\n ('isDelete', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'TypeInfo',\n },\n ),\n migrations.CreateModel(\n name='UserAddress',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('userName', models.CharField(max_length=20)),\n ('uaddress', models.CharField(max_length=100, null=True, blank=True)),\n ('uphone', models.CharField(max_length=11)),\n ('ucode', models.CharField(max_length=6)),\n ('ustaue', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'UserAddress',\n },\n ),\n migrations.CreateModel(\n name='UserInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('uname', models.CharField(max_length=20)),\n ('upwd', models.CharField(max_length=50)),\n ('uemail', models.CharField(max_length=40)),\n ('isDelete', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'UserInfo',\n },\n ),\n migrations.AddField(\n model_name='useraddress',\n name='user',\n field=models.ForeignKey(to='usermode.UserInfo'),\n ),\n migrations.AddField(\n model_name='orderinfo',\n name='user',\n field=models.ForeignKey(to='usermode.UserInfo'),\n ),\n migrations.AddField(\n model_name='orderdetailinfo',\n name='order',\n field=models.ForeignKey(to='usermode.OrderInfo'),\n ),\n migrations.AddField(\n model_name='goodsinfo',\n name='gtype',\n field=models.ForeignKey(to='usermode.TypeInfo'),\n ),\n migrations.AddField(\n model_name='cartinfo',\n name='goods',\n field=models.ForeignKey(to='usermode.GoodsInfo'),\n ),\n migrations.AddField(\n model_name='cartinfo',\n name='user',\n field=models.ForeignKey(to='usermode.UserInfo'),\n ),\n ]\n", "sub_path": "FreshEveryDay/usermode/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 5368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 102, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 102, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 103, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 104, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 104, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 115, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 117, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 117, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 122, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 130, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 132, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 132, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 135, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 135, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 137, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 137, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 140, "usage_type": "name"}]} {"seq_id": "595266328", "text": "import sys\nfrom os.path import abspath, join\n\nimport numpy as np\nimport scipy.optimize as so\nimport scipy.spatial as sp\nimport trimesh\nfrom PySide2.QtWidgets import QApplication\nfrom PySide2.QtWidgets import QMainWindow\nfrom PySide2.QtWidgets import QMessageBox\n\nfrom loader.pose_loader import PoseLoader\nfrom loader.regressor_loader import RegressorLoader\nfrom loader.smpl_loader import SmplLoader\nfrom main_window import Ui_MainWindow\n\nNEUTRAL = 'n'\nFEMALE = 'f'\nMALE = 'm'\n\nmodels = {\n NEUTRAL: 'basic_model_neutral.pkl',\n # FEMALE: 'basic_model_female.pkl',\n # MALE: 'basic_model_male.pkl',\n}\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent=parent)\n self.setupUi(self)\n\n self.scene_chache = {}\n self.pose_loader = PoseLoader()\n self.smpl_loader = SmplLoader()\n self.regressor_loader = RegressorLoader()\n\n self.neutral_button.toggled.connect(lambda: self._init_widget())\n if FEMALE not in models:\n self.female_button.setVisible(False)\n self.female_button.toggled.connect(lambda: self._init_widget())\n if MALE not in models:\n self.male_button.setVisible(False)\n self.male_button.toggled.connect(lambda: self._init_widget())\n\n self.poses_box.currentIndexChanged.connect(lambda: self._init_widget())\n\n self.regressor_name.textChanged.connect(lambda: self.check_convert_button())\n self.convert_button.clicked.connect(lambda: self.convert_scenes_to_regressor())\n\n self.reset_button.clicked.connect(lambda: self.reset())\n\n self._init_poses()\n self._init_regressors()\n self._init_widget()\n\n def _init_poses(self):\n poses_path = abspath(join(__file__, '..', 'smpl', 'poses', 'cmu_smpl_01_01.pkl'))\n self.pose_loader.init_poses(poses_path)\n poses, shapes, transforms = self.pose_loader.sample_poses()\n\n for gender, file_name in models.items():\n smpl_model_path = abspath(join(__file__, '..', 'smpl', 'models', file_name))\n self.smpl_loader.init_model(smpl_model_path)\n\n for i, (pose, shape, transform) in enumerate(zip(poses, shapes, transforms)):\n verts = self.smpl_loader.load_vertices(pose, shape, transform)\n faces = self.smpl_loader.faces\n\n mesh = trimesh.Trimesh(vertices=verts,\n faces=faces,\n vertex_colors=[200, 200, 200, 255],\n face_colors=[0, 0, 0, 0],\n use_embree=False,\n process=False)\n\n transform = trimesh.transformations.rotation_matrix(np.deg2rad(-90), [1, 0, 0], mesh.centroid)\n mesh.apply_transform(transform)\n\n transform = trimesh.transformations.rotation_matrix(np.deg2rad(-90), [0, 1, 0], mesh.centroid)\n mesh.apply_transform(transform)\n\n key = gender + '_pose_' + str(i)\n self.scene_chache[key] = mesh.scene()\n\n for i in range(poses.shape[0]):\n self.poses_box.addItem('Pose ' + str(i))\n\n def _init_regressors(self):\n regressors_path = abspath(join(__file__, '..', 'regressors', '*.npy'))\n self.regressor_loader.init_regressors(regressors_path)\n\n def _init_widget(self):\n gender = self.get_checked_gender()\n index = self.poses_box.currentIndex()\n index = index if index >= 0 else 0\n key = gender + '_pose_' + str(index)\n scene = self.scene_chache[key]\n self.openGLWidget.initialize_scene(scene, self.smpl_loader.j_regressor, self.regressor_loader.joint_regressor)\n self.openGLWidget.updateGL()\n\n def get_checked_gender(self):\n if self.neutral_button.isChecked():\n gender = NEUTRAL\n elif self.female_button.isChecked():\n gender = FEMALE\n elif self.male_button.isChecked():\n gender = MALE\n else:\n raise Exception('no button checked')\n\n return gender\n\n def check_convert_button(self):\n enable_button = self.regressor_name.text() != ''\n self.convert_button.setEnabled(enable_button)\n\n def convert_scenes_to_regressor(self):\n self.brute_force_closest_vertex_to_joints()\n\n def brute_force_closest_vertex_to_joints(self):\n scenes = self.get_valid_scenes()\n\n # calculate 50 nearest vertex ids for all meshes and joints\n closest_k = 50\n candidates = {}\n keep_searching = True\n while keep_searching:\n for key, scene in scenes.items():\n joint = np.squeeze(scene['joint'].vertices)\n distances, vertex_ids = scene['mesh'].kdtree.query(joint, closest_k)\n candidates[key] = {vertex_id: dist for vertex_id, dist in zip(vertex_ids, distances)}\n\n # only keep common ids\n from functools import reduce\n common_ids = reduce(np.intersect1d, [list(c.keys()) for c in candidates.values()])\n\n # calculate average distance per mesh/joint for valid ids\n mean_dist = [np.mean([c[common_id] for c in candidates.values()]) for common_id in common_ids]\n mean_dist = {common_id: dist for common_id, dist in zip(common_ids, mean_dist)}\n mean_dist = {k: v for k, v in sorted(mean_dist.items(), key=lambda item: item[1])}\n\n if len(list(mean_dist)) == 0:\n closest_k += 10\n else:\n keep_searching = False\n\n # pick closest vertex with min average distance to all joints per mesh\n closest_id = list(mean_dist)[0]\n final_vertices = [closest_id]\n mean_dist.pop(closest_id)\n\n while len(final_vertices) < 10:\n # calculate all distance combinations between valid vertices\n vertex_ids = list(mean_dist)\n id_dist = [sp.distance.cdist(s['mesh'].vertices[final_vertices], s['mesh'].vertices[vertex_ids]) for s in\n scenes.values()]\n id_dist = np.mean(id_dist, axis=0)\n\n # min the ratio between distances to joint and distance to all other vertices\n best_dist = list(mean_dist.values()) / id_dist\n best_id = np.argmin(best_dist)\n\n # max the difference between distance to all other vertices and distances to joint\n best_dist = id_dist - list(mean_dist.values())\n best_id = np.argmax(best_dist)\n\n n, m = np.unravel_index(best_id, best_dist.shape)\n best_id = vertex_ids[m]\n\n final_vertices.append(best_id)\n mean_dist.pop(best_id)\n\n vertices, joints = [], []\n for scene in scenes.values():\n verts = np.asarray(scene['mesh'].vertices).reshape([-1, 3])\n verts = verts[final_vertices]\n vertices.append(verts)\n joint = np.asarray(scene['joint'].vertices).reshape([-1, 3])\n joints.append(joint)\n\n vertices = np.stack(vertices).transpose([0, 2, 1]).reshape([-1, len(final_vertices)])\n joints = np.stack(joints).transpose([0, 2, 1]).reshape([-1])\n\n vertex_weight = np.zeros([6890, ])\n weights = so.nnls(vertices, joints)[0]\n vertex_weight[final_vertices] = weights\n\n file = join('regressors', 'regressor_{}.npy'.format(self.regressor_name.text()))\n with open(file, 'wb') as f:\n vertex_weight = vertex_weight.astype(np.float32)\n vertex_weight = np.expand_dims(vertex_weight, -1)\n np.save(f, vertex_weight)\n\n widget = QMessageBox(\n icon=QMessageBox.Information,\n text='Regressor file successfully saved to: {}\\n\\nClick Reset to start again'.format(file),\n buttons=[QMessageBox.Ok]\n )\n widget.exec_()\n\n vertex_weight = np.squeeze(vertex_weight)\n self.convert_button.setEnabled(False)\n self.regressor_name.setEnabled(False)\n\n for scene in self.scene_chache.values():\n mesh = scene.geometry['geometry_0']\n mesh.visual.vertex_colors = [200, 200, 200, 255]\n mesh.visual.vertex_colors[final_vertices] = [0, 255, 0, 255]\n\n x = np.matmul(vertex_weight, mesh.vertices[:, 0])\n y = np.matmul(vertex_weight, mesh.vertices[:, 1])\n z = np.matmul(vertex_weight, mesh.vertices[:, 2])\n joints = np.vstack((x, y, z)).T\n joints = trimesh.PointCloud(joints, colors=[0, 255, 0, 255])\n scene.add_geometry(joints, geom_name='new_joints')\n\n def get_valid_scenes(self):\n valid_scenes = {}\n\n scenes, scales, centroids = [], [], []\n for scene in self.scene_chache.values():\n if 'joint_0' not in scene.geometry:\n continue\n scenes.append(scene)\n scales.append(scene.scale)\n centroids.append(scene.centroid)\n\n for i, scene in enumerate(scenes):\n valid_scenes[i] = {'mesh': scene.geometry['geometry_0'], 'joint': scene.geometry['joint_0']}\n\n return valid_scenes\n\n def reset(self):\n self.regressor_name.setText('')\n self.regressor_name.setEnabled(True)\n self.poses_box.clear()\n\n self._init_poses()\n self._init_regressors()\n self._init_widget()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n\n try: # main loop\n sys.exit(app.exec_())\n except SystemExit:\n pass\n", "sub_path": "keypoint_marker/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "PySide2.QtWidgets.QMainWindow", "line_number": 28, "usage_type": "name"}, {"api_name": "main_window.Ui_MainWindow", "line_number": 28, "usage_type": "name"}, {"api_name": "loader.pose_loader.PoseLoader", "line_number": 35, "usage_type": "call"}, {"api_name": "loader.smpl_loader.SmplLoader", "line_number": 36, "usage_type": "call"}, {"api_name": "loader.regressor_loader.RegressorLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "trimesh.Trimesh", "line_number": 71, "usage_type": "call"}, {"api_name": "trimesh.transformations.rotation_matrix", "line_number": 78, "usage_type": "call"}, {"api_name": "trimesh.transformations", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.deg2rad", "line_number": 78, "usage_type": "call"}, {"api_name": "trimesh.transformations.rotation_matrix", "line_number": 81, "usage_type": "call"}, {"api_name": "trimesh.transformations", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.deg2rad", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 131, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 140, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 157, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 157, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 157, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "scipy.optimize.nnls", "line_number": 187, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 187, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 194, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 196, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox.Information", "line_number": 197, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 197, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.Ok", "line_number": 199, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 199, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 215, "usage_type": "call"}, {"api_name": "trimesh.PointCloud", "line_number": 216, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 246, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 246, "usage_type": "attribute"}, {"api_name": "{'reduce': 'functools.reduce'}", "line_number": 247, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 251, "usage_type": "call"}]} {"seq_id": "277453529", "text": "from app import app, db\nfrom bson import ObjectId\n\nfrom flask import (request, abort)\nimport json\n\n\n@app.route('/users/authenticate', methods=['POST'])\ndef login():\n user = {\n \"login\": request.json['login'],\n \"password\": request.json['password']\n }\n has_user = db['users'].find_one({\"login\": user[\"login\"]})\n if not has_user:\n abort(404)\n elif user['password'] != has_user['password']:\n abort(400)\n else:\n has_user['_id'] = str(has_user['_id'])\n del has_user['password']\n return encode_json(has_user)\n\n\n@app.route('/users', methods=['POST'])\ndef create_user():\n user = {\n \"login\": request.json['login'],\n \"name\": request.json['name'],\n \"password\": request.json['password']\n }\n has_user = db['users'].find_one({\"login\": user[\"login\"]})\n if not has_user:\n db['users'].insert(user)\n user['_id'] = str(user['_id'])\n return encode_json(user)\n else:\n abort(400)\n\n\n@app.route('/products', methods=['POST'])\ndef create_product():\n product = {\n \"code\": request.json['code'],\n \"name\": request.json['name'],\n \"description\": request.json['description'],\n \"price\": request.json['price'],\n \"grid\": []\n }\n grid = request.json['grid']\n for item in grid:\n product['grid'].append({\n \"color\": item['color'],\n \"size\": item['size']\n })\n db['products'].insert(product)\n product['_id'] = str(product['_id'])\n return encode_json(product)\n\n\n@app.route('/products', methods=['GET'])\ndef find_products():\n products = db['products'].find()\n products_find = []\n for item in products:\n product = {\n \"code\": item['code'],\n \"name\": item['name'],\n \"description\": item['description'],\n \"price\": item['price'],\n \"grid\": []\n }\n grids = item['grid']\n for grid in grids:\n product['grid'].append({\n \"color\": grid['color'],\n \"size\": grid['size']\n })\n product['_id'] = str(item['_id'])\n products_find.append(product)\n\n return encode_json(products_find)\n\n\n@app.route('/products/', methods=['GET'])\ndef find_product_by_id(product_id):\n item = db['products'].find_one({\"_id\": ObjectId(product_id)})\n if item:\n product = {\n \"code\": item['code'],\n \"name\": item['name'],\n \"description\": item['description'],\n \"price\": item['price'],\n \"grid\": []\n }\n grids = item['grid']\n for grid in grids:\n product['grid'].append({\n \"color\": grid['color'],\n \"size\": grid['size']\n })\n product['_id'] = str(item['_id'])\n\n return encode_json(product)\n else:\n abort(404)\n\n\ndef encode_json(value):\n return json.dumps(value, ensure_ascii=False).encode('utf8')\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 2955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.request.json", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 12, "usage_type": "name"}, {"api_name": "app.db", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 18, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 8, "usage_type": "call"}, {"api_name": "app.app", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "app.db", "line_number": 32, "usage_type": "name"}, {"api_name": "app.db", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 38, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 25, "usage_type": "call"}, {"api_name": "app.app", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "app.db", "line_number": 56, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 41, "usage_type": "call"}, {"api_name": "app.app", "line_number": 41, "usage_type": "name"}, {"api_name": "app.db", "line_number": 63, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 61, "usage_type": "call"}, {"api_name": "app.app", "line_number": 61, "usage_type": "name"}, {"api_name": "app.db", "line_number": 87, "usage_type": "name"}, {"api_name": "bson.ObjectId", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 106, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 85, "usage_type": "call"}, {"api_name": "app.app", "line_number": 85, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 110, "usage_type": "call"}]} {"seq_id": "3541722", "text": "# -*- coding: utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom docx import Document\nfrom docx.shared import Cm\n\n\nclass Exceptions:\n\n\tdef signal_exception(self, reason):\n\t\tprint(reason)\n\nclass Philpapers:\n\n\tdef check_urls(self, raw_urls):\n\t\n\t\turls_count = raw_urls.count('https://philpapers.org/rec/')\n\t\turls_processing = raw_urls\n\t\turls_list = []\n\t\t\n\t\tif urls_count == 0:\n\t\t\texception_reason = \"Brak poprawnych adresรณw URL odnoszฤ…cych do Philpapers\"\n\t\t\tExceptions.signal_exception(exception_reason)\n\t\t\n\t\tfor i in range(urls_count):\n\t\t\tnth_url = urls_processing[urls_processing.rfind('https://philpapers.org/rec/'):]\n\t\t\turls_processing = urls_processing[0:len(urls_processing)-len(nth_url)]\n\t\t\tnth_url = nth_url.strip(', ')\n\t\t\turls_list.append(nth_url)\n\t\t\t\n\t\turls_list[:] = list(set(urls_list))\n\n\t\treturn urls_list\n\t\n\tdef get_data(self, philpapers_url):\n\t\n\t\tr_lines_dict = []\n\t\t\n\t\tprint(\"ppapers open\")\n\t\tr = requests.get(philpapers_url, stream=True)\n\n\t\t_delimiter = 'Options'\n\n\t\tfor line in r.iter_lines():\n\t\t\t_line = line.decode('utf-8','ignore')\n\t\t\tif _line != _delimiter:\n\t\t\t\tr_lines_dict.append(_line)\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\t\t\n\t\tr.close()\n\t\tprint(\"closed\")\n\t\t\n\t\tr_lines_str = ''.join(r_lines_dict)\n\t\t\n\t\treturn r_lines_str\n\t\n\tdef check_integrity(self, raw_data, raw_data_url):\n\t\n\t\t# if raw_data.find('schema.org/Article') > 0:\n\t\t\t# Philpapers.paper_is_article = True\n\t\t# elif raw_data.find('schema.org/Book') > 0:\n\t\t\t# Philpapers.paper_is_book = True\n\t\tif raw_data.count('schema.org/Book') == 0 and raw_data.count('schema.org/Article') == 0:\n\t\t\tintegrity = False\n\t\telse:\n\t\t\tintegrity = True\n\t\t\n\t\treturn integrity\n\t\n\tdef parse_data(self, raw_data):\n\t\t\n\t\tpaper_is_book = False\n\t\tpaper_is_article = False\n\t\tarticle_in_book = False\n\t\tarticle_in_journal = False\n\t\tpaper_type = None\n\t\tpaper_data = None\n\t\t\n\t\t# ten czek ล‚atwiej zrobiฤ‡ jak jeszcze wszystko jest stringiem\n\t\t\n\t\tif raw_data.find('schema.org/Article') > 0:\n\t\t\tpaper_is_article = True\n\t\telif raw_data.find('schema.org/Book') > 0:\n\t\t\tpaper_is_book = True\n\t\t\n\t\tr_bs = BeautifulSoup(raw_data, 'html.parser')\n\t\t\n\t\t# to siฤ™ dzieje w meta:\n\t\t\n\t\tbs_authors_list = r_bs.find_all(attrs={'name':'citation_author'})\n\t\tauthors_number = len(bs_authors_list)\n\t\t\n\t\tbs_title = str(r_bs.find_all(attrs={'name':'citation_title'}))\n\t\tbs_date = str(r_bs.find_all(attrs={'name':'citation_publication_date'}))\n\t\t\n\t\tpaper_title = bs_title[bs_title.find('content=')+9:bs_title.rfind('name=')-2]\n\t\tpaper_date = bs_date[bs_date.find('content=')+9:bs_date.rfind('name=')-2]\n\t\t\n\t\tif authors_number == 1:\n\t\t\ttemp_author = str(bs_authors_list[0])\n\t\t\ttemp_author = temp_author[temp_author.find('content=')+9:temp_author.rfind('name=')-2]\n\t\t\ttemp_author_lastname = temp_author[temp_author.rfind(' '):].strip()\n\t\t\ttemp_author_firstname = temp_author[:temp_author.rfind(' ')].strip()\n\t\t\tpaper_authors = temp_author_lastname + ' ' + temp_author_firstname\n\t\telse:\n\t\t\ttemp_authors_list = []\n\t\t\tfor item in bs_authors_list:\n\t\t\t\ttemp_author = str(item)\n\t\t\t\ttemp_author = temp_author[temp_author.find('content=')+9:temp_author.rfind('name=')-2]\n\t\t\t\ttemp_author_lastname = temp_author[temp_author.rfind(' '):].strip()\n\t\t\t\ttemp_author_firstname = temp_author[:temp_author.rfind(' ')].strip()\n\t\t\t\ttemp_author = temp_author_lastname + ' ' + temp_author_firstname\n\t\t\t\ttemp_authors_list.append(temp_author)\n\t\t\tpaper_authors = ', '.join(temp_authors_list)\n\t\t\n\t\t# to siฤ™ dzieje w recAuthors:\n\t\t\n\t\tbs_recauthors = str(r_bs.find_all(class_='recAuthors'))\n\t\t\n\t\tif bs_recauthors.count('(ed.)') == 1:\n\t\t\tpaper_authors = paper_authors + ' (ed.)'\n\t\telif bs_recauthors.count('(eds.)') == 1:\n\t\t\tpaper_authors = paper_authors + ' (eds.)'\n\t\t\t\n\t\t# to siฤ™ dzieje w pubinfo:\n\t\t\t\t\n\t\tbs_pubinfo = str(r_bs.find_all(class_='recPubInfo'))\n\t\t\n\t\tif bs_pubinfo.find('asearch.pl') > 0:\n\t\t\tarticle_in_journal = True\n\t\telse:\n\t\t\tarticle_in_book = True\n\t\t\t\n\t\tif paper_is_book:\n\t\t\tpaper_publisher = bs_pubinfo[bs_pubinfo.find('recPubInfo')+12:bs_pubinfo.find('copyrightYear')-17].strip()\n\t\t\tpaper_type = \"book\"\n\t\t\tpaper_data = {'authors':paper_authors, 'title':paper_title, 'publisher':paper_publisher, 'year':paper_date}\n\t\telif paper_is_article and article_in_journal:\n\t\t\ttemp_journal_data = bs_pubinfo[bs_pubinfo.find('asearch.pl'):bs_pubinfo.find('copyrightYear')-17]\n\t\t\tpaper_journal_name = temp_journal_data[temp_journal_data.find('>')+1:temp_journal_data.rfind('a>')-2].strip()\n\t\t\tpaper_journal_issue = temp_journal_data[temp_journal_data.find('em>')+3:temp_journal_data.rfind(':')].strip()\n\t\t\tpaper_journal_pages = temp_journal_data[temp_journal_data.rfind(':')+1:].strip()\n\t\t\tpaper_journal_pages = paper_journal_pages.replace(' ','')\n\t\t\tpaper_journal_pages = paper_journal_pages.replace('--','-')\n\t\t\tpaper_type = \"article\"\n\t\t\tpaper_data = {'authors':paper_authors, 'title':paper_title, 'journal':paper_journal_name, 'issue':paper_journal_issue, 'pages':paper_journal_pages, 'year':paper_date}\n\t\telif paper_is_article and article_in_book:\n\t\t\ttemp_book_data = bs_pubinfo[bs_pubinfo.find('recPubInfo')+12:bs_pubinfo.find('copyrightYear')-17]\n\t\t\tbook_authors = temp_book_data[temp_book_data.find('In')+2:temp_book_data.find('')-2].strip()\n\t\t\ttemp_book_publisher = temp_book_data[temp_book_data.rfind('em>')+4:temp_book_data.rfind('copyrightYear')-17]\n\t\t\t\n\t\t\tif temp_book_data.count(' pp. ') > 0:\n\t\t\t\tbook_publisher = temp_book_data[temp_book_data.rfind('em>')+4:temp_book_data.find(' pp. ')].strip(' .')\n\t\t\t\tbook_pages = temp_book_data[temp_book_data.find(' pp. ')+5:].strip(' .')\n\t\t\t\tbook_pages = book_pages.replace(' ','')\n\t\t\t\tbook_pages = book_pages.replace('--','-')\n\t\t\t\tpaper_type = \"chapter\"\n\t\t\t\tpaper_data = {'authors':paper_authors, 'title':paper_title, 'editors':book_authors, 'booktitle':book_title, 'publisher':book_publisher, 'pages':book_pages, 'year':paper_date}\n\t\t\telse:\n\t\t\t\tbook_publisher = temp_book_data[temp_book_data.rfind('em>')+4:].strip(' .')\n\t\t\t\tbook_pages = '(brak wyszczegรณlnionych stron)'\n\t\t\t\tpaper_type = \"chapter\"\n\t\t\t\tpaper_data = {'authors':paper_authors, 'title':paper_title, 'editors':book_authors, 'booktitle':book_title, 'publisher':book_publisher, 'pages':book_pages, 'year':paper_date}\n\t\t\n\t\treturn paper_type, paper_data\n\n\nclass MakeDocx:\n\t\t\n\tdef save_document(self, doc_to_save, docx_hashed_list_name):\n\t\n\t\tdoc_to_save.save(docx_hashed_list_name + '.docx')\n\t\tprint('docx saved')\n\t\t\n\tdef format_document(self, doc_to_format):\n\t\t\n\t\t# standardowe wymiary a4\n\t\tstandard_height = Cm(29.7)\n\t\tstandard_width = Cm(21)\n\t\tstandard_margin = Cm(2.5)\n\t\t\n\t\tsection = doc_to_format.sections[0]\n\t\t\n\t\tsection.page_height = standard_height\n\t\tsection.page_width = standard_width\n\t\tsection.bottom_margin = standard_margin\n\t\tsection.top_margin = standard_margin\n\t\tsection.left_margin = standard_margin\n\t\tsection.right_margin = standard_margin\n\t\n\tdef iter_paragraphs(self, entry_list, hashed_list_name):\n\t\t\n\t\tdocument_to_iter = Document()\n\t\t\n\t\tfor entry in entry_list:\n\t\t\tprint('iter_paragraphs entry:')\n\t\t\tprint(entry)\n\t\t\tdocument_to_iter = MakeDocx.add_paragraph_apa(self, entry[0], entry[1], document_to_iter)\n\t\t\tprint('przerobione')\n\t\t\n\t\tMakeDocx.format_document(self, document_to_iter)\n\t\tMakeDocx.save_document(self, document_to_iter, hashed_list_name)\n\t\tprint('przerob gotowy')\n\t\t\n\tdef add_paragraph_apa(self, entry_type, entry_data, doc):\n\t\t\n\t\timport html\n\t\t\n\t\tif entry_type == 'article':\n\t\t\t\n\t\t\tentry_par = doc.add_paragraph()\n\t\t\t\n\t\t\tentry_authors = entry_par.add_run(entry_data['authors']+' ')\n\t\t\tentry_year = entry_par.add_run('('+entry_data['year']+'). ')\n\t\t\tentry_title = entry_par.add_run(entry_data['title']+'. ')\n\t\t\tentry_journal = entry_par.add_run(entry_data['journal']).italic = True\n\t\t\tentry_issue = entry_par.add_run(', '+entry_data['issue']+', ')\n\t\t\tentry_pages = entry_par.add_run(entry_data['pages']+'.')\n\t\t\t\n\t\t\t# Nazwisko, X., Nazwisko2, X. Y., Nazwisko3, Z. (rok). Tytuล‚ artykuล‚u. Tytuล‚ Czasopisma, nr rocznika(nr zeszytu), strona poczฤ…tkuโ€“strona koล„ca.\n\t\t\n\t\telif entry_type == 'book':\n\t\t\t\n\t\t\tentry_par = doc.add_paragraph()\n\t\t\t\n\t\t\tentry_authors = entry_par.add_run(entry_data['authors']+' ')\n\t\t\tentry_year = entry_par.add_run('('+entry_data['year']+'). ')\n\t\t\tentry_title = entry_par.add_run(entry_data['title']+'. ').italic = True\n\t\t\tentry_publisher = entry_par.add_run(entry_data['publisher']+'.')\n\t\t\t\n\t\t\t# Nazwisko, X., Nazwisko, X. Y. (rok). Tytuล‚ ksiฤ…ลผki. Miejsce wydania: Wydawnictwo.\n\t\t\t\n\t\telif entry_type == 'chapter':\n\t\t\n\t\t\tentry_par = doc.add_paragraph()\n\t\n\t\t\tentry_authors = entry_par.add_run(entry_data['authors']+' ')\n\t\t\tentry_year = entry_par.add_run('('+entry_data['year']+'). ')\n\t\t\tentry_title = entry_par.add_run(entry_data['title']+'. W: ')\n\t\t\tentry_editors = entry_par.add_run(entry_data['editors']+', ')\n\t\t\tentry_booktitle = entry_par.add_run(entry_data['booktitle']).italic = True\n\t\t\tentry_pages = entry_par.add_run(' (s. '+entry_data['pages']+'). ')\n\t\t\tentry_publisher = entry_par.add_run(entry_data['publisher']+'.')\n\n\t\t\t# Nazwisko, X. (rok). Tytuล‚ rozdziaล‚u. W: Y. Nazwisko, B. Nazwisko (red.), Tytuล‚ ksiฤ…ลผki (s. strona poczฤ…tkuโ€“strona koล„ca). Miejsce wydania: Wydawnictwo.\n\n\t\treturn doc\n\n\ndef mainloop(input_urls):\n\tppapers_load = Philpapers()\n\tmakedocx_load = MakeDocx()\n\texceptions_load = Exceptions()\n\n\turls_checked = Philpapers.check_urls(ppapers_load, input_urls)\n\tif len(urls_checked) > 0:\n\n\t\tphilpapers_entries_list = []\n\t\tfor item in urls_checked:\n\t\t\tentry_data = Philpapers.get_data(ppapers_load, item)\n\t\t\tintegrity_check = Philpapers.check_integrity(ppapers_load, entry_data, item)\n\t\t\tprint(integrity_check)\n\t\t\tif integrity_check:\n\t\t\t\tphilpapers_entries_list.append(Philpapers.parse_data(ppapers_load, entry_data))\n\t\t\telse:\n\t\t\t\texception_reason = 'Nie moลผna pobraฤ‡ danych z %s : odnoล›nik nie zawiera poprawnego schematu strony Philpapers (brak typu dokumentu). Popraw listฤ™ adresรณw i sprรณbuj jeszcze raz' % (item)\n\t\t\t\texceptions_load.Exceptions(exceptions_load, exception_reason)\n\n\t\t# MakeDocx.iter_paragraphs(philpapers_entries_list)\n\treturn philpapers_entries_list\n\ndef testloop(input_list, hashname):\n\tppapers_load = Philpapers()\n\tmakedocx_load = MakeDocx()\n\texceptions_load = Exceptions()\n\n\tif len(input_list) > 0:\n\t\tphilpapers_entries_list = input_list\n\t\t'''\n\t\tfor item in urls_checked:\n\t\t\tentry_data = Philpapers.get_data(ppapers_load, item)\n\t\t\tintegrity_check = Philpapers.check_integrity(ppapers_load, entry_data, item)\n\t\t\tprint(integrity_check)\n\t\t\tif integrity_check:\n\t\t\t\tphilpapers_entries_list.append(Philpapers.parse_data(ppapers_load, entry_data))\n\t\t\telse:\n\t\t\t\texception_reason = 'Nie moลผna pobraฤ‡ danych z %s : odnoล›nik nie zawiera poprawnego schematu strony Philpapers (brak typu dokumentu). Popraw listฤ™ adresรณw i sprรณbuj jeszcze raz' % (item)\n\t\t\t\tExceptions.signal_exception(exceptions_load, exception_reason)\n\t\t'''\n\t\tMakeDocx.iter_paragraphs(makedocx_load, philpapers_entries_list, hashname)\n\treturn philpapers_entries_list\n\nif __name__ == \"__main__\":\n\ttest_urls = 'https://philpapers.org/rec/BAUPAT-3 https://philpapers.org/rec/SURMCA https://philpapers.org/rec/RORTGO https://philpapers.org/rec/OPPACT https://philpapers.org/rec/SMATTT https://philpapers.org/rec/RORTGO'\n\ttest_list = [('book', {'year': '1994', 'publisher': 'Cornell University Press', 'authors': 'Lloyd Genevieve', 'title': 'Part of Nature: Self-Knowledge in Spinoza's Ethics'}), ('article', {'pages': '342-359', 'year': '2013', 'journal': 'Deleuze Studies', 'title': 'Nomadic Ethics', 'authors': 'Braidotti Rosi', 'issue': '7 (3)'}), ('chapter', {'authors': 'Smart J. J. C.', 'booktitle': 'Contemporary Debates in Metaphysics' , 'publisher': 'Blackwell', 'title': 'The tenseless theory of time', 'year': '2008', 'pages': '226-38', 'editors': 'Theodore Sider, John Hawthorne & Dean W. Zimmerman (eds.)'})]\n\ttestloop(str(TAG(test_list)))\n", "sub_path": "modules/Bibliografer.py", "file_name": "Bibliografer.py", "file_ext": "py", "file_size_in_byte": 11853, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 88, "usage_type": "call"}, {"api_name": "docx.shared.Cm", "line_number": 181, "usage_type": "call"}, {"api_name": "docx.shared.Cm", "line_number": 182, "usage_type": "call"}, {"api_name": "docx.shared.Cm", "line_number": 183, "usage_type": "call"}, {"api_name": "docx.Document", "line_number": 196, "usage_type": "call"}, {"api_name": "{'html': 'html'}.iter_paragraphs", "line_number": 293, "usage_type": "call"}]} {"seq_id": "541799953", "text": "# Copyright 2015 Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\n\"\"\"This module provides APIs for communicating with DCNM.\"\"\"\n\n\nimport re\nimport requests\nimport sys\n\nfrom oslo_serialization import jsonutils\n\nfrom networking_cisco._i18n import _LE, _LI\n\nfrom networking_cisco.apps.saf.common import dfa_exceptions as dexc\nfrom networking_cisco.apps.saf.common import dfa_logger as logging\n\n\nLOG = logging.getLogger(__name__)\nUNKNOWN_SRVN_NODE_IP = '0.0.0.0'\nUNKNOWN_DCI_ID = -1\n\n\nclass DFARESTClient(object):\n\n \"\"\"DFA client class that provides APIs to interact with DCNM.\"\"\"\n\n def __init__(self, cfg):\n self._base_ver = '7.1(0)'\n self._is_iplus = False\n self._ip = cfg.dcnm.dcnm_ip\n self._user = cfg.dcnm.dcnm_user\n self._pwd = cfg.dcnm.dcnm_password\n self._part_name = cfg.dcnm.default_partition_name\n if (not self._ip) or (not self._user) or (not self._pwd):\n msg = (\"[DFARESTClient] Input DCNM IP, user name or password\"\n \"parameter is not specified\")\n raise ValueError(msg)\n\n self._req_headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json; charset=UTF-8'}\n\n self.default_cfg_profile = cfg.dcnm.default_cfg_profile\n self.default_vrf_profile = cfg.dcnm.default_vrf_profile\n # url timeout: 10 seconds\n self.timeout_resp = (10 if not cfg.dcnm.timeout_resp else\n cfg.dcnm.timeout_resp)\n self._exp_time = 100000\n self._resp_ok = (requests.codes.ok, requests.codes.created,\n requests.codes.accepted)\n\n self.dcnm_protocol = self.get_dcnm_protocol()\n # Fill the urls for DCNM Rest API's.\n self.fill_urls()\n\n self._cur_ver = self.get_version()\n self._detect_iplus()\n\n # Update the default network profile based on version of DCNM.\n self._set_default_cfg_profile()\n self._default_md = None\n\n def _detect_iplus(self):\n \"\"\"Check the DCNM version and determine if it's for iplus\"\"\"\n\n ver_expr = \"([0-9]+)\\.([0-9]+)\\((.*)\\)\"\n re.compile(ver_expr)\n v1 = re.match(ver_expr, self._cur_ver)\n v2 = re.match(ver_expr, self._base_ver)\n\n if int(v1.group(1)) > int(v2.group(1)):\n self._is_iplus = True\n elif int(v1.group(1)) == int(v2.group(1)):\n if int(v1.group(2)) > int(v2.group(2)):\n self._is_iplus = True\n elif int(v1.group(2)) == int(v2.group(2)):\n self._is_iplus = v1.group(3) >= v2.group(3)\n\n LOG.info(_LI(\"DCNM version: %(cur_ver)s, iplus: %(is_iplus)s\"),\n {'cur_ver': self._cur_ver, 'is_iplus': self._is_iplus})\n\n def _failure_msg(self, response):\n return \"[%s] %s\" % (response.status_code, response.text)\n\n def get_segmentid_range(self, orchestrator_id):\n \"\"\"Get segment id range from DCNM. \"\"\"\n\n url = \"%s/%s\" % (self._segmentid_ranges_url, orchestrator_id)\n\n res = self._send_request('GET', url, None, 'segment-id range')\n if res and res.status_code in self._resp_ok:\n return res.json()\n\n def set_segmentid_range(self, orchestrator_id, segid_min, segid_max):\n \"\"\"set segment id range in DCNM. \"\"\"\n\n url = self._segmentid_ranges_url\n\n payload = {'orchestratorId': orchestrator_id,\n 'segmentIdRanges': \"%s-%s\" % (segid_min, segid_max)}\n\n res = self._send_request('POST', url, payload, 'segment-id range')\n if not (res and res.status_code in self._resp_ok):\n LOG.error(_LE(\"Failed to set segment id range for orchestrator \"\n \"%(orch)s on DCNM: %(text)s\"),\n {'orch': orchestrator_id, 'text': res.text})\n raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))\n\n def update_segmentid_range(self, orchestrator_id, segid_min, segid_max):\n \"\"\"update segment id range in DCNM. \"\"\"\n url = \"%s/%s\" % (self._segmentid_ranges_url, orchestrator_id)\n\n payload = {'orchestratorId': orchestrator_id,\n 'segmentIdRanges': \"%s-%s\" % (segid_min, segid_max)}\n\n res = self._send_request('PUT', url, payload, 'segment-id range')\n if not (res and res.status_code in self._resp_ok):\n LOG.error(_LE(\"Failed to update segment id range for orchestrator \"\n \"%(orch)s on DCNM: %(text)s\"),\n {'orch': orchestrator_id, 'text': res.text})\n raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))\n\n def _set_default_cfg_profile(self):\n \"\"\"Set default network config profile.\n\n Check whether the default_cfg_profile value exist in the current\n version of DCNM. If not, set it to new default value which is supported\n by latest version.\n \"\"\"\n try:\n cfgplist = self.config_profile_list()\n if self.default_cfg_profile not in cfgplist:\n self.default_cfg_profile = ('defaultNetworkUniversalEfProfile'\n if self._is_iplus else\n 'defaultNetworkIpv4EfProfile')\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE(\"Failed to send requst to DCNM.\"))\n self.default_cfg_profile = 'defaultNetworkIpv4EfProfile'\n\n def _create_network(self, network_info):\n \"\"\"Send create network request to DCNM.\n\n :param network_info: network parameters to be created on DCNM\n \"\"\"\n url = self._create_network_url % (network_info['organizationName'],\n network_info['partitionName'])\n payload = network_info\n\n LOG.info(_LI('url %(url)s payload %(payload)s'),\n {'url': url, 'payload': payload})\n return self._send_request('POST', url, payload, 'network')\n\n def _config_profile_get(self, thisprofile):\n \"\"\"Get information of a config profile from DCNM.\n\n :param thisprofile: network config profile in request\n \"\"\"\n url = self._cfg_profile_get_url % (thisprofile)\n payload = {}\n\n res = self._send_request('GET', url, payload, 'config-profile')\n if res and res.status_code in self._resp_ok:\n return res.json()\n\n def _config_profile_list(self):\n \"\"\"Get list of supported config profile from DCNM.\"\"\"\n url = self._cfg_profile_list_url\n payload = {}\n\n try:\n res = self._send_request('GET', url, payload, 'config-profile')\n if res and res.status_code in self._resp_ok:\n return res.json()\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE(\"Failed to send requst to DCNM.\"))\n\n def _get_settings(self):\n \"\"\"Get global mobility domain from DCNM.\"\"\"\n url = self._global_settings_url\n payload = {}\n res = self._send_request('GET', url, payload, 'settings')\n if res and res.status_code in self._resp_ok:\n return res.json()\n\n def _set_default_mobility_domain(self):\n settings = self._get_settings()\n LOG.info(_LI(\"settings is %s\") % settings)\n\n if ('globalMobilityDomain' in settings.keys()):\n global_md = settings.get('globalMobilityDomain')\n self._default_md = global_md.get('name')\n LOG.info(_LI(\"setting default md to be %s\") % self._default_md)\n else:\n self._default_md = \"md0\"\n\n def _create_org(self, orch_id, name, desc):\n \"\"\"Create organization on the DCNM.\n\n :param orch_id: orchestrator ID\n :param name: Name of organization\n :param desc: Description of organization\n \"\"\"\n url = self._org_url\n payload = {\n \"organizationName\": name,\n \"description\": name if len(desc) == 0 else desc,\n \"orchestrationSource\": orch_id}\n\n return self._send_request('POST', url, payload, 'organization')\n\n def _create_or_update_partition(self, org_name, part_name, desc,\n dci_id=UNKNOWN_DCI_ID, vrf_prof=None,\n service_node_ip=UNKNOWN_SRVN_NODE_IP,\n operation='POST'):\n \"\"\"Send create or update partition request to the DCNM.\n\n :param org_name: name of organization\n :param part_name: name of partition\n :param desc: description of partition\n :dci_id: DCI ID for inter-DC\n :vrf_prof: VRF Profile Name\n :service_node_ip: Service Node's Address\n \"\"\"\n if part_name is None:\n part_name = self._part_name\n if vrf_prof is None or dci_id == UNKNOWN_DCI_ID or (\n service_node_ip == UNKNOWN_SRVN_NODE_IP):\n part_info = self._get_partition(org_name, part_name)\n if vrf_prof is None:\n vrf_prof = self.get_partition_vrfProf(org_name, part_name,\n part_info=part_info)\n if dci_id == UNKNOWN_DCI_ID:\n dci_id = self.get_partition_dciId(org_name, part_name,\n part_info=part_info)\n if service_node_ip == UNKNOWN_SRVN_NODE_IP:\n service_node_ip = self.get_partition_serviceNodeIp(\n org_name, part_name, part_info=part_info)\n url = ((self._create_part_url % (org_name)) if operation == 'POST' else\n self._update_part_url % (org_name, part_name))\n\n payload = {\n \"partitionName\": part_name,\n \"description\": part_name if len(desc) == 0 else desc,\n \"serviceNodeIpAddress\": service_node_ip,\n \"organizationName\": org_name}\n\n # Check the DCNM version and find out whether it is need to have\n # extra payload for the new version when creating/updating a partition.\n if self._is_iplus:\n # Need to add extra payload for the new version.\n enable_dci = \"true\" if dci_id and int(dci_id) != 0 else \"false\"\n extra_payload = {\n \"vrfProfileName\": vrf_prof,\n \"vrfName\": ':'.join((org_name, part_name)),\n \"dciId\": dci_id,\n \"enableDCIExtension\": enable_dci}\n payload.update(extra_payload)\n\n return self._send_request(operation, url, payload, 'partition')\n\n def _get_partition(self, org_name, part_name=None):\n \"\"\"send get partition request to the DCNM.\n\n :param org_name: name of organization\n :param part_name: name of partition\n \"\"\"\n if part_name is None:\n part_name = self._part_name\n url = self._update_part_url % (org_name, part_name)\n res = self._send_request(\"GET\", url, '', 'partition')\n if res and res.status_code in self._resp_ok:\n return res.json()\n\n def update_partition_static_route(self, org_name, part_name,\n static_ip_list, vrf_prof=None,\n service_node_ip=None):\n \"\"\"Send static route update requests to DCNM.\n\n :param org_name: name of organization\n :param part_name: name of partition\n :static_ip_list: List of static IP addresses\n :vrf_prof: VRF Profile\n :service_node_ip: Service Node IP address\n \"\"\"\n if part_name is None:\n part_name = self._part_name\n if vrf_prof is None:\n vrf_prof = self.default_vrf_profile\n operation = 'PUT'\n url = (self._update_part_url % (org_name, part_name))\n ip_str = ''\n ip_cnt = 0\n for ip in static_ip_list:\n ip_sub = \"$n0\" + str(ip_cnt) + \"=\" + str(ip) + \";\"\n ip_str = ip_str + ip_sub\n ip_cnt = ip_cnt + 1\n cfg_args = {\n \"$vrfName=\" + org_name + ':' + part_name + \";\"\n \"$include_serviceNodeIpAddress=\" + service_node_ip + \";\"\n + ip_str\n }\n cfg_args = ';'.join(cfg_args)\n payload = {\n \"partitionName\": part_name,\n \"organizationName\": org_name,\n \"dciExtensionStatus\": \"Not configured\",\n \"vrfProfileName\": vrf_prof,\n \"vrfName\": ':'.join((org_name, part_name)),\n \"configArg\": cfg_args}\n\n res = self._send_request(operation, url, payload, 'partition')\n return (res is not None and res.status_code in self._resp_ok)\n\n def _delete_org(self, org_name):\n \"\"\"Send organization delete request to DCNM.\n\n :param org_name: name of organization to be deleted\n \"\"\"\n url = self._del_org_url % (org_name)\n return self._send_request('DELETE', url, '', 'organization')\n\n def _delete_partition(self, org_name, partition_name):\n \"\"\"Send partition delete request to DCNM.\n\n :param org_name: name of organization\n :param partition_name: name of partition\n \"\"\"\n url = self._del_part % (org_name, partition_name)\n return self._send_request('DELETE', url, '', 'partition')\n\n def _delete_network(self, network_info):\n \"\"\"Send network delete request to DCNM.\n\n :param network_info: contains network info to be deleted.\n \"\"\"\n org_name = network_info.get('organizationName', '')\n part_name = network_info.get('partitionName', '')\n segment_id = network_info['segmentId']\n if 'mobDomainName' in network_info:\n vlan_id = network_info['vlanId']\n mob_dom_name = network_info['mobDomainName']\n url = self._network_mob_url % (org_name, part_name, vlan_id,\n mob_dom_name)\n else:\n url = self._network_url % (org_name, part_name, segment_id)\n return self._send_request('DELETE', url, '', 'network')\n\n def _get_network(self, network_info):\n \"\"\"Send network get request to DCNM.\n\n :param network_info: contains network info to query.\n \"\"\"\n org_name = network_info.get('organizationName', '')\n part_name = network_info.get('partitionName', '')\n segment_id = network_info['segmentId']\n url = self._network_url % (org_name, part_name, segment_id)\n return self._send_request('GET', url, '', 'network')\n\n def _login_request(self, url_login):\n \"\"\"Internal function to send login request. \"\"\"\n\n expiration_time = self._exp_time\n payload = {'expirationTime': expiration_time}\n # TODO(padkrish), after testing with certificates, make the\n # verify option configurable.\n res = requests.post(url_login,\n data=jsonutils.dumps(payload),\n headers=self._req_headers,\n auth=(self._user, self._pwd),\n timeout=self.timeout_resp, verify=False)\n session_id = ''\n if res and res.status_code in self._resp_ok:\n session_id = res.json().get('Dcnm-Token')\n self._req_headers.update({'Dcnm-Token': session_id})\n\n def _login(self):\n \"\"\"Login request to DCNM. \"\"\"\n\n self._login_request(self._login_url)\n\n def _logout_request(self, url_logout):\n \"\"\"Internal logout request to DCNM. \"\"\"\n\n requests.post(url_logout,\n headers=self._req_headers,\n timeout=self.timeout_resp, verify=False)\n\n def _logout(self, url_arg=None):\n \"\"\"Logout request to DCNM.\"\"\"\n\n self._logout_request(self._logout_url)\n\n def _send_request(self, operation, url, payload, desc):\n \"\"\"Send request to DCNM.\"\"\"\n\n res = None\n try:\n payload_json = None\n if payload and payload != '':\n payload_json = jsonutils.dumps(payload)\n self._login()\n desc_lookup = {'POST': ' creation', 'PUT': ' update',\n 'DELETE': ' deletion', 'GET': ' get'}\n\n res = requests.request(operation, url, data=payload_json,\n headers=self._req_headers,\n timeout=self.timeout_resp, verify=False)\n desc += desc_lookup.get(operation, operation.lower())\n LOG.info(_LI(\"DCNM-send_request: %(desc)s %(url)s %(pld)s\"),\n {'desc': desc, 'url': url, 'pld': payload})\n\n self._logout()\n except (requests.HTTPError, requests.Timeout,\n requests.ConnectionError) as exc:\n LOG.exception(_LE('Error during request: %s'), exc)\n raise dexc.DfaClientRequestFailed(reason=exc)\n\n return res\n\n def config_profile_list(self):\n \"\"\"Return config profile list from DCNM.\"\"\"\n\n these_profiles = self._config_profile_list() or []\n profile_list = [q for p in these_profiles for q in\n [p.get('profileName')]]\n return profile_list\n\n def config_profile_fwding_mode_get(self, profile_name):\n \"\"\"Return forwarding mode of given config profile.\"\"\"\n\n profile_params = self._config_profile_get(profile_name)\n fwd_cli = 'fabric forwarding mode proxy-gateway'\n if profile_params and fwd_cli in profile_params['configCommands']:\n return 'proxy-gateway'\n else:\n return 'anycast-gateway'\n\n def get_config_profile_for_network(self, net_name):\n \"\"\"Get the list of profiles.\"\"\"\n\n cfgplist = self.config_profile_list()\n cfgname = net_name.partition(':')[2]\n\n cfgtuple = set()\n for cfg_prof in cfgplist:\n if cfg_prof.startswith('defaultNetwork'):\n cfg_alias = (cfg_prof.split('defaultNetwork')[1].\n split('Profile')[0])\n elif cfg_prof.endswith('Profile'):\n cfg_alias = cfg_prof.split('Profile')[0]\n else:\n cfg_alias = cfg_prof\n cfgtuple.update([(cfg_prof, cfg_alias)])\n cfgp = [a for a, b in cfgtuple if cfgname == b]\n prof = cfgp[0] if cfgp else self.default_cfg_profile\n fwd_mod = self.config_profile_fwding_mode_get(prof)\n return (prof, fwd_mod)\n\n def create_network(self, tenant_name, network, subnet,\n dhcp_range=True):\n \"\"\"Create network on the DCNM.\n\n :param tenant_name: name of tenant the network belongs to\n :param network: network parameters\n :param subnet: subnet parameters of the network\n \"\"\"\n seg_id = str(network.segmentation_id)\n subnet_ip_mask = subnet.cidr.split('/')\n gw_ip = subnet.gateway_ip\n cfg_args = [\n \"$segmentId=\" + seg_id,\n \"$netMaskLength=\" + subnet_ip_mask[1],\n \"$gatewayIpAddress=\" + gw_ip,\n \"$networkName=\" + network.name,\n \"$vlanId=0\",\n \"$vrfName=\" + tenant_name + ':' + self._part_name\n ]\n cfg_args = ';'.join(cfg_args)\n\n ip_range = ','.join([\"%s-%s\" % (p['start'], p['end']) for p in\n subnet.allocation_pools])\n\n dhcp_scopes = {'ipRange': ip_range,\n 'subnet': subnet.cidr,\n 'gateway': gw_ip}\n\n network_info = {\"segmentId\": seg_id,\n \"vlanId\": \"0\",\n \"mobilityDomainId\": \"None\",\n \"profileName\": network.config_profile,\n \"networkName\": network.name,\n \"configArg\": cfg_args,\n \"organizationName\": tenant_name,\n \"partitionName\": self._part_name,\n \"description\": network.name,\n \"netmaskLength\": subnet_ip_mask[1],\n \"gateway\": gw_ip}\n if dhcp_range:\n network_info[\"dhcpScope\"] = dhcp_scopes\n\n if self._is_iplus:\n # Need to add the vrf name to the network info\n prof = self._config_profile_get(network.config_profile)\n if prof and prof.get('profileSubType') == 'network:universal':\n # For universal profile vrf has to e organization:partition\n network_info[\"vrfName\"] = ':'.join((tenant_name,\n self._part_name))\n else:\n # Otherwise, it should be left empty.\n network_info[\"vrfName\"] = \"\"\n\n LOG.info(_LI(\"Creating %s network in DCNM.\"), network_info)\n\n res = self._create_network(network_info)\n if res and res.status_code in self._resp_ok:\n LOG.info(_LI(\"Created %s network in DCNM.\"), network_info)\n else:\n LOG.error(_LE(\"Failed to create %s network in DCNM.\"),\n network_info)\n raise dexc.DfaClientRequestFailed(reason=res)\n\n def create_service_network(self, tenant_name, network, subnet,\n dhcp_range=True):\n \"\"\"Create network on the DCNM.\n\n :param tenant_name: name of tenant the network belongs to\n :param network: network parameters\n :param subnet: subnet parameters of the network\n \"\"\"\n network_info = {}\n subnet_ip_mask = subnet.cidr.split('/')\n if self._default_md is None:\n self._set_default_mobility_domain()\n vlan_id = '0'\n gw_ip = subnet.gateway_ip\n part_name = network.part_name\n if not part_name:\n part_name = self._part_name\n\n if network.vlan_id:\n vlan_id = str(network.vlan_id)\n if network.mob_domain_name is not None:\n mob_domain_name = network.mob_domain_name\n else:\n mob_domain_name = self._default_md\n else:\n mob_domain_name = None\n\n seg_id = str(network.segmentation_id)\n seg_str = \"$segmentId=\" + seg_id\n cfg_args = [\n seg_str,\n \"$netMaskLength=\" + subnet_ip_mask[1],\n \"$gatewayIpAddress=\" + gw_ip,\n \"$networkName=\" + network.name,\n \"$vlanId=\" + vlan_id,\n \"$vrfName=\" + tenant_name + ':' + part_name\n ]\n cfg_args = ';'.join(cfg_args)\n\n ip_range = ','.join([\"%s-%s\" % (p['start'], p['end']) for p in\n subnet.allocation_pools])\n\n dhcp_scopes = {'ipRange': ip_range,\n 'subnet': subnet.cidr,\n 'gateway': gw_ip}\n\n network_info = {\"vlanId\": vlan_id,\n \"mobilityDomainId\": mob_domain_name,\n \"profileName\": network.config_profile,\n \"networkName\": network.name,\n \"configArg\": cfg_args,\n \"organizationName\": tenant_name,\n \"partitionName\": part_name,\n \"description\": network.name,\n \"netmaskLength\": subnet_ip_mask[1],\n \"gateway\": gw_ip}\n if seg_id:\n network_info[\"segmentId\"] = seg_id\n if dhcp_range:\n network_info[\"dhcpScope\"] = dhcp_scopes\n if hasattr(subnet, 'secondary_gw'):\n network_info[\"secondaryGateway\"] = subnet.secondary_gw\n if self._is_iplus:\n # Need to add the vrf name to the network info\n prof = self._config_profile_get(network.config_profile)\n if prof and prof.get('profileSubType') == 'network:universal':\n # For universal profile vrf has to e organization:partition\n network_info[\"vrfName\"] = ':'.join((tenant_name, part_name))\n else:\n # Otherwise, it should be left empty.\n network_info[\"vrfName\"] = \"\"\n\n LOG.info(_LI(\"Creating %s network in DCNM.\"), network_info)\n\n res = self._create_network(network_info)\n if res and res.status_code in self._resp_ok:\n LOG.info(_LI(\"Created %s network in DCNM.\"), network_info)\n else:\n LOG.error(_LE(\"Failed to create %s network in DCNM.\"),\n network_info)\n raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))\n\n def delete_network(self, tenant_name, network):\n \"\"\"Delete network on the DCNM.\n\n :param tenant_name: name of tenant the network belongs to\n :param network: object that contains network parameters\n \"\"\"\n seg_id = network.segmentation_id\n network_info = {\n 'organizationName': tenant_name,\n 'partitionName': self._part_name,\n 'segmentId': seg_id,\n }\n LOG.debug(\"Deleting %s network in DCNM.\", network_info)\n\n res = self._delete_network(network_info)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Deleted %s network in DCNM.\", network_info)\n else:\n LOG.error(_LE(\"Failed to delete %s network in DCNM.\"),\n network_info)\n raise dexc.DfaClientRequestFailed(reason=res)\n\n def delete_service_network(self, tenant_name, network):\n \"\"\"Delete service network on the DCNM.\n\n :param tenant_name: name of tenant the network belongs to\n :param network: object that contains network parameters\n \"\"\"\n\n network_info = {}\n part_name = network.part_name\n if not part_name:\n part_name = self._part_name\n seg_id = str(network.segmentation_id)\n if network.vlan:\n vlan_id = str(network.vlan)\n if network.mob_domain_name is not None:\n mob_domain_name = network.mob_domain_name\n else:\n # The current way will not work since _default_md is obtained\n # during create_service_network. It's preferrable to get it\n # during init TODO(padkrish)\n if self._default_md is None:\n self._set_default_mobility_domain()\n mob_domain_name = self._default_md\n network_info = {\n 'organizationName': tenant_name,\n 'partitionName': part_name,\n 'mobDomainName': mob_domain_name,\n 'vlanId': vlan_id,\n 'segmentId': seg_id,\n }\n else:\n network_info = {\n 'organizationName': tenant_name,\n 'partitionName': part_name,\n 'segmentId': seg_id,\n }\n LOG.debug(\"Deleting %s network in DCNM.\", network_info)\n\n res = self._delete_network(network_info)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Deleted %s network in DCNM.\", network_info)\n else:\n LOG.error(_LE(\"Failed to delete %s network in DCNM.\"),\n network_info)\n raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))\n\n def delete_project(self, tenant_name, part_name):\n \"\"\"Delete project on the DCNM.\n\n :param tenant_name: name of project.\n :param part_name: name of partition.\n \"\"\"\n res = self._delete_partition(tenant_name, part_name)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Deleted %s partition in DCNM.\", part_name)\n else:\n LOG.error(_LE(\"Failed to delete %(part)s partition in DCNM.\"\n \"Response: %(res)s\"), {'part': part_name, 'res': res})\n raise dexc.DfaClientRequestFailed(reason=res)\n\n res = self._delete_org(tenant_name)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Deleted %s organization in DCNM.\", tenant_name)\n else:\n LOG.error(_LE(\"Failed to delete %(org)s organization in DCNM.\"\n \"Response: %(res)s\"), {'org': tenant_name, 'res': res})\n raise dexc.DfaClientRequestFailed(reason=res)\n\n def delete_partition(self, org_name, partition_name):\n \"\"\"Send partition delete request to DCNM.\n\n :param partition_name: name of partition to be deleted\n \"\"\"\n res = self._delete_partition(org_name, partition_name)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Deleted %s partition in DCNM.\", partition_name)\n else:\n LOG.error(_LE(\"Failed to delete %(part)s partition in DCNM.\"\n \"Response: %(res)s\"),\n ({'part': partition_name, 'res': res}))\n raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))\n\n def create_project(self, orch_id, org_name, part_name, dci_id, desc=None):\n \"\"\"Create project on the DCNM.\n\n :param orch_id: orchestrator ID\n :param org_name: name of organization.\n :param part_name: name of partition.\n :param dci_id: Data Center interconnect id.\n :param desc: description of project.\n \"\"\"\n desc = desc or org_name\n res = self._create_org(orch_id, org_name, desc)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Created %s organization in DCNM.\", org_name)\n else:\n LOG.error(_LE(\"Failed to create %(org)s organization in DCNM.\"\n \"Response: %(res)s\"), {'org': org_name, 'res': res})\n raise dexc.DfaClientRequestFailed(reason=res)\n\n self.create_partition(org_name, part_name, dci_id,\n self.default_vrf_profile, desc=desc)\n\n def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,\n service_node_ip=UNKNOWN_SRVN_NODE_IP,\n vrf_prof=None, desc=None):\n \"\"\"Update project on the DCNM.\n\n :param org_name: name of organization.\n :param part_name: name of partition.\n :param dci_id: Data Center interconnect id.\n :param desc: description of project.\n \"\"\"\n desc = desc or org_name\n res = self._create_or_update_partition(org_name, part_name, desc,\n dci_id=dci_id,\n service_node_ip=service_node_ip,\n vrf_prof=vrf_prof,\n operation='PUT')\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Update %s partition in DCNM.\", part_name)\n else:\n LOG.error(_LE(\"Failed to update %(part)s partition in DCNM.\"\n \"Response: %(res)s\"), {'part': part_name, 'res': res})\n raise dexc.DfaClientRequestFailed(reason=res)\n\n def create_partition(self, org_name, part_name, dci_id, vrf_prof,\n service_node_ip=None, desc=None):\n \"\"\"Create partition on the DCNM.\n\n :param org_name: name of organization to be created\n :param part_name: name of partition to be created\n :param dci_id: DCI ID\n :vrf_prof: VRF profile for the partition\n :param service_node_ip: Specifies the Default route IP address.\n :param desc: string that describes organization\n \"\"\"\n desc = desc or org_name\n res = self._create_or_update_partition(org_name, part_name,\n desc, dci_id=dci_id,\n service_node_ip=service_node_ip,\n vrf_prof=vrf_prof)\n if res and res.status_code in self._resp_ok:\n LOG.debug(\"Created %s partition in DCNM.\", part_name)\n else:\n LOG.error(_LE(\"Failed to create %(part)s partition in DCNM.\"\n \"Response: %(res)s\"), ({'part': part_name, 'res': res}))\n raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))\n\n def get_partition_vrfProf(self, org_name, part_name=None, part_info=None):\n \"\"\"get VRF Profile for the partition from the DCNM.\n\n :param org_name: name of organization\n :param part_name: name of partition\n \"\"\"\n vrf_profile = None\n if part_info is None:\n part_info = self._get_partition(org_name, part_name)\n LOG.info(_LI(\"query result from dcnm for partition info is %s\"),\n part_info)\n if (\"vrfProfileName\" in part_info):\n vrf_profile = part_info.get(\"vrfProfileName\")\n return vrf_profile\n\n def get_partition_dciId(self, org_name, part_name, part_info=None):\n \"\"\"get DCI ID for the partition.\n\n :param org_name: name of organization\n :param part_name: name of partition\n \"\"\"\n if part_info is None:\n part_info = self._get_partition(org_name, part_name)\n LOG.info(_LI(\"query result from dcnm for partition info is %s\"),\n part_info)\n if part_info is not None and \"dciId\" in part_info:\n return part_info.get(\"dciId\")\n\n def get_partition_serviceNodeIp(self, org_name, part_name, part_info=None):\n \"\"\"get Service Node IP address from the DCNM.\n\n :param org_name: name of organization\n :param part_name: name of partition\n \"\"\"\n if part_info is None:\n part_info = self._get_partition(org_name, part_name)\n LOG.info(_LI(\"query result from dcnm for partition info is %s\"),\n part_info)\n if part_info is not None and \"serviceNodeIpAddress\" in part_info:\n return part_info.get(\"serviceNodeIpAddress\")\n\n def get_partition_segmentId(self, org_name, part_name, part_info=None):\n \"\"\"get partition Segment ID from the DCNM.\n\n :param org_name: name of organization\n :param part_name: name of partition\n \"\"\"\n if part_info is None:\n part_info = self._get_partition(org_name, part_name)\n LOG.info(_LI(\"query result from dcnm for partition info is %s\"),\n part_info)\n if part_info is not None and \"partitionSegmentId\" in part_info:\n return part_info.get(\"partitionSegmentId\")\n\n def list_networks(self, org, part):\n \"\"\"Return list of networks from DCNM.\n\n :param org: name of organization.\n :param part: name of partition.\n \"\"\"\n if org and part:\n list_url = self._del_part + '/networks'\n list_url = list_url % (org, part)\n res = self._send_request('GET', list_url, '', 'networks')\n if res and res.status_code in self._resp_ok:\n return res.json()\n\n def list_organizations(self):\n \"\"\"Return list of organizations from DCNM.\"\"\"\n\n try:\n res = self._send_request('GET', self._org_url, '', 'organizations')\n if res and res.status_code in self._resp_ok:\n return res.json()\n except dexc.DfaClientRequestFailed:\n LOG.error(_LE(\"Failed to send request to DCNM.\"))\n\n def get_network(self, org, segid):\n \"\"\"Return given network from DCNM.\n\n :param org: name of organization.\n :param segid: segmentation id of the network.\n \"\"\"\n network_info = {\n 'organizationName': org,\n 'partitionName': self._part_name,\n 'segmentId': segid,\n }\n res = self._get_network(network_info)\n if res and res.status_code in self._resp_ok:\n return res.json()\n\n def get_version(self):\n \"\"\"Get the DCNM version.\"\"\"\n\n url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)\n payload = {}\n\n try:\n res = self._send_request('GET', url, payload, 'dcnm-version')\n if res and res.status_code in self._resp_ok:\n return res.json().get('Dcnm-Version')\n except dexc.DfaClientRequestFailed as exc:\n LOG.error(_LE(\"Failed to get DCNM version.\"))\n sys.exit(_LE(\"ERROR: Failed to connect to DCNM: %s\"), exc)\n\n def _verify_protocol(self, protocol):\n try:\n self._login_request(\"%s://%s/rest/logon\" % (protocol, self._ip))\n self._logout_request(\"%s://%s/rest/logout\" % (protocol, self._ip))\n except (requests.HTTPError, requests.Timeout,\n requests.ConnectionError) as exc:\n LOG.error(_LE(\"Login Test failed for %(protocol)s Exc %(exc)s.\"),\n {'protocol': protocol, 'exc': exc})\n return False\n return True\n\n def get_dcnm_protocol(self):\n \"\"\"Routine to find out if DCNM is using http or https.\n\n DCNM 10 (Fuji-4) and above does not support http. Only https is\n supported and enabled by default.\n Prior DCNM versions supported both http and https. But, only http\n was enabled by default.\n So, enabler needs to find out if DCNM is supporting http or https to\n be friendly with the existing installed setups.\n \"\"\"\n if self._verify_protocol('https'):\n return 'https'\n if self._verify_protocol('http'):\n return 'http'\n sys.exit(_LE(\"ERROR: Both http and https test failed\"))\n\n def _build_url(self, url_remaining):\n \"\"\"This function builds the URL from host, protocol and string. \"\"\"\n return self.host_protocol_url + url_remaining\n\n def fill_urls(self):\n \"\"\"This assigns the URL's based on the protocol. \"\"\"\n\n protocol = self.dcnm_protocol\n self._org_url = '%s://%s/rest/auto-config/organizations' % (\n (protocol, self._ip))\n self._create_network_url = ('%s://%s/' % (protocol, self._ip) +\n 'rest/auto-config/organizations'\n '/%s/partitions/%s/networks')\n self.host_protocol_url = '%s://%s/' % (protocol, self._ip)\n self._create_network_url = self._build_url(\n 'rest/auto-config/organizations'\n '/%s/partitions/%s/networks')\n self._cfg_profile_list_url = '%s://%s/rest/auto-config/profiles' % (\n (protocol, self._ip))\n self._cfg_profile_get_url = self._cfg_profile_list_url + '/%s'\n self._global_settings_url = self._build_url(\n 'rest/auto-config/settings')\n self._create_part_url = self._build_url(\n 'rest/auto-config/organizations/%s/partitions')\n self._update_part_url = self._build_url(\n 'rest/auto-config/organizations/%s/partitions/%s')\n self._del_org_url = self._build_url(\n 'rest/auto-config/organizations/%s')\n self._del_part = self._build_url(\n 'rest/auto-config/organizations/%s/partitions/%s')\n self._network_url = self._build_url(\n 'rest/auto-config/organizations/%s/partitions/'\n '%s/networks/segment/%s')\n self._network_mob_url = self._build_url(\n 'rest/auto-config/organizations/%s/partitions/'\n '%s/networks/vlan/%s/mobility-domain/%s')\n self._segmentid_ranges_url = self._build_url(\n 'rest/settings/segmentid-ranges')\n self._login_url = self._build_url('rest/logon')\n self._logout_url = self._build_url('rest/logout')\n", "sub_path": "networking_cisco/apps/saf/server/cisco_dfa_rest.py", "file_name": "cisco_dfa_rest.py", "file_ext": "py", "file_size_in_byte": 39636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "networking_cisco.apps.saf.common.dfa_logger.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_logger", "line_number": 33, "usage_type": "name"}, {"api_name": "requests.codes", "line_number": 63, "usage_type": "attribute"}, {"api_name": "requests.codes", "line_number": 64, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 81, "usage_type": "call"}, {"api_name": "re.match", "line_number": 82, "usage_type": "call"}, {"api_name": "re.match", "line_number": 83, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 93, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 118, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 121, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 121, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 132, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 135, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 135, "usage_type": "name"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 150, "usage_type": "attribute"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 150, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 151, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 163, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 188, "usage_type": "attribute"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 188, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 189, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 201, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 206, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 380, "usage_type": "call"}, {"api_name": "oslo_serialization.jsonutils.dumps", "line_number": 381, "usage_type": "call"}, {"api_name": "oslo_serialization.jsonutils", "line_number": 381, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 398, "usage_type": "call"}, {"api_name": "oslo_serialization.jsonutils.dumps", "line_number": 414, "usage_type": "call"}, {"api_name": "oslo_serialization.jsonutils", "line_number": 414, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 419, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 423, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 427, "usage_type": "attribute"}, {"api_name": "requests.Timeout", "line_number": 427, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 428, "usage_type": "attribute"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 429, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 430, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 430, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 526, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 530, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 532, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 534, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 534, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 608, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 612, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 614, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 616, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 616, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 636, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 638, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 638, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 682, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 684, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 684, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 696, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 698, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 698, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 704, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 706, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 706, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 717, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 720, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 720, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 736, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 738, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 738, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 762, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 764, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 764, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 785, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 787, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 787, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 798, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 812, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 825, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LI", "line_number": 838, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 863, "usage_type": "attribute"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 863, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 864, "usage_type": "call"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions.DfaClientRequestFailed", "line_number": 891, "usage_type": "attribute"}, {"api_name": "networking_cisco.apps.saf.common.dfa_exceptions", "line_number": 891, "usage_type": "name"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 892, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 893, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 893, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 899, "usage_type": "attribute"}, {"api_name": "requests.Timeout", "line_number": 899, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 900, "usage_type": "attribute"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 901, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 920, "usage_type": "call"}, {"api_name": "networking_cisco._i18n._LE", "line_number": 920, "usage_type": "call"}]} {"seq_id": "340281450", "text": "#%%\nimport os \nimport numpy as np \nimport theano \nimport theano.tensor as T\nimport matplotlib.pyplot as plt \n#%%\n# Data and normalization \ninputs = np.array([[3,5],[5,1],[10,2]]).astype(\"float32\")\noutputs = np.array([[75],[82],[93]]).astype(\"float32\")\ninputs = inputs / np.amax(inputs, axis = 0)\noutputs = outputs / 100\n#%%\n# hyper parameters and weights\ninputLayerSize = 2\nhiddenLayerSize = 3\noutputLayerSize = 1\nW1 = theano.shared((np.random.randn(inputLayerSize,hiddenLayerSize)).astype(\"float32\"), name = \"W1\")\nb1 = theano.shared( np.zeros(hiddenLayerSize).astype(\"float32\") , name = \"b1\")\nW2 = theano.shared((np.random.randn(hiddenLayerSize,outputLayerSize)).astype(\"float32\"), name = \"W2\")\nb2 = theano.shared( np.zeros(outputLayerSize).astype(\"float32\") , name = \"b2\")\n#%%\n# Forward propogation \nX = T.matrix(\"X\") \nz1 = T.dot(X,W1) + b1\na1 = T.nnet.sigmoid(z1)\nz2 = T.dot(a1,W2) + b2\n# using ReLu improve the results a lot \ny_hat = T.nnet.relu(z2)\nforward = theano.function([X], y_hat)\n#%%\n# cost function, gradient and optimizer\nepsilon = 0.01\ny = T.fcol(\"y\")\nloss = 0.5 * ((y - y_hat)**2).sum()\ncalloss = theano.function([X,y], loss)\n# gradinet \ndW1, dW2 = T.grad(loss, [W1,W2])\ndb1, db2 = T.grad(loss,[b1,b2])\n# optimizer \n#%% \ntrain = theano.function(\n inputs = [X,y],\n outputs = [y_hat,loss],\n updates = [\n [W2, W2 - epsilon * dW2],\n [W1, W1 - epsilon * dW1],\n [b2, b2 - epsilon * db2],\n [b1, b1 - epsilon * db1]\n ]\n )\n#%%\ncost = []\nfor i in range(20000):\n pred, cost_iter = train(inputs, outputs)\n cost.append(cost_iter)\nplt.plot(cost)\nprint (pred,\"\\n\" ,outputs)\n \n \n \n \n \n \n \n \n \n \n \n ", "sub_path": "example2/neural2.py", "file_name": "neural2.py", "file_ext": "py", "file_size_in_byte": 1750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 11, "usage_type": "call"}, {"api_name": "theano.shared", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "theano.shared", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "theano.tensor.matrix", "line_number": 24, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 24, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 25, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 25, "usage_type": "name"}, {"api_name": "theano.tensor.nnet.sigmoid", "line_number": 26, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 26, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 26, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 27, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 27, "usage_type": "name"}, {"api_name": "theano.tensor.nnet.relu", "line_number": 29, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 29, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 29, "usage_type": "name"}, {"api_name": "theano.function", "line_number": 30, "usage_type": "call"}, {"api_name": "theano.tensor.fcol", "line_number": 34, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 34, "usage_type": "name"}, {"api_name": "theano.function", "line_number": 36, "usage_type": "call"}, {"api_name": "theano.tensor.grad", "line_number": 38, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 38, "usage_type": "name"}, {"api_name": "theano.tensor.grad", "line_number": 39, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 39, "usage_type": "name"}, {"api_name": "theano.function", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} {"seq_id": "540740821", "text": "\"\"\"Module with Scenario entity.\"\"\"\n\nfrom datetime import datetime\nfrom collections import namedtuple\n\nfrom sqlalchemy import and_\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Query\nfrom sqlalchemy.sql.expression import func\n\nfrom autostorage.database.model.scenario.scenario import Scenario as ScenarioRecord\nfrom autostorage.database.model.scenario.view_state import (\n ScenarioViewState as ScenarioViewStateRecord,\n )\nfrom autostorage.database.model.scenario.structure_state import (\n ScenarioStructureState as ScenarioStructureStateRecord,\n ScenarioStructureLock as ScenarioStructureLockRecord,\n )\n\nfrom autostorage.core._base import _BaseClient\nfrom autostorage.core.param_spec import Pagination\n\n\nclass ScenarioStructureLockedError(Exception):\n \"\"\"Exception to prevent simultanious changes of the scenario.\"\"\"\n\n\nclass Scenario(_BaseClient):\n \"\"\"Class to work with autostorage Scenario.\"\"\"\n\n def __init__(self, base, scenario_id):\n super(Scenario, self).__init__(base)\n self.__id = scenario_id\n\n @property\n def scenario_id(self):\n \"\"\"ID of the scenario.\"\"\"\n return self.__id\n\n def get_view_states(self, pagination=None):\n \"\"\"Get view states of the scenario.\n\n View states are ordered from newest to oldest.\n\n :param pagination: instance of :class:`Pagination `.\n :returns: list of instances :class:`ScenarioViewState`.\n \"\"\"\n with self.base.get_session() as session:\n scenario = session.query(ScenarioRecord).filter_by(scenario_id=self.__id).one()\n states_query = scenario.view_states\n if pagination:\n offset = pagination.page_index * pagination.items_per_page\n states_query = states_query.offset(offset).limit(pagination.items_per_page)\n\n states = []\n for state_record in states_query:\n state = ScenarioViewState(\n scenario=self,\n name=state_record.name,\n description=state_record.description,\n date=state_record.changed\n )\n states.append(state)\n\n return states\n\n def rename(self, name):\n \"\"\"Rename the scenario.\n\n :param name: new name for the scenario.\n :raises: AssertionError if new name is the same as current one.\n \"\"\"\n current_state = self.get_view_states(pagination=Pagination(0, 1))[0]\n assert current_state.name != name, \"New name is the same as current one\"\n state_record = ScenarioViewStateRecord(\n scenario_id=self.__id,\n name=name,\n description=current_state.description,\n changed=datetime.utcnow()\n )\n\n with self.base.get_session() as session:\n session.add(state_record)\n session.commit()\n\n def change_description(self, description):\n \"\"\"Change description of the scenario.\n\n :param description: new name for the scenario.\n :raises: AssertionError if new description is the same as current one.\n \"\"\"\n current_state = self.get_view_states(pagination=Pagination(0, 1))[0]\n assert current_state.description != description, (\n \"New description is the same as current one\"\n )\n state_record = ScenarioViewStateRecord(\n scenario_id=self.__id,\n name=current_state.name,\n description=description,\n changed=datetime.utcnow()\n )\n\n with self.base.get_session() as session:\n session.add(state_record)\n session.commit()\n\n def get_structure(self, structure_filter=None):\n \"\"\"Get structure of scenario by filter.\n\n :param structure_filter: instance of :class:`ScenarioStructureFilter\n `.\n :returns: dictionary with tree_path as keys and node_ids as values.\n \"\"\"\n subquery = Query([\n ScenarioStructureStateRecord.scenario_id,\n ScenarioStructureStateRecord.tree_path,\n func.max(ScenarioStructureStateRecord.changed).label('newest_change_date')\n ]).filter_by(\n scenario_id=self.__id\n )\n\n if structure_filter:\n if structure_filter.date:\n subquery = subquery.filter(\n ScenarioStructureStateRecord.changed <= structure_filter.date)\n\n if structure_filter.tree_path:\n subquery = subquery.filter(ScenarioStructureStateRecord.tree_path.like(\n \"{0}-%\".format(structure_filter.tree_path)\n ))\n\n subquery = subquery.group_by(\n ScenarioStructureStateRecord.scenario_id,\n ScenarioStructureStateRecord.tree_path\n ).subquery()\n\n states_query = Query([\n ScenarioStructureStateRecord\n ]).join(\n subquery,\n and_(\n ScenarioStructureStateRecord.scenario_id == subquery.columns.scenario_id,\n ScenarioStructureStateRecord.tree_path == subquery.columns.tree_path,\n ScenarioStructureStateRecord.changed == subquery.columns.newest_change_date\n )\n ).filter(\n ScenarioStructureStateRecord.enabled == True # pylint: disable=singleton-comparison\n )\n\n with self.base.get_session() as session:\n bound_query = states_query.with_session(session)\n return {record.tree_path: record.node_id for record in bound_query}\n\n def _lock_structure(self):\n \"\"\"Lock the structure to prevent changes.\n\n :raises: :class:`ScenarioStructureLockedError` exception if scenario is already locked.\n \"\"\"\n scenario_id = self.__id\n lock_record = ScenarioStructureLockRecord(scenario_id=self.__id)\n try:\n with self.base.get_session() as session:\n session.add(lock_record)\n session.commit()\n except IntegrityError:\n raise ScenarioStructureLockedError(\n \"Structure of scenario with id '{0}' has been already locked\".format(scenario_id)\n )\n\n def _unlock_structure(self):\n \"\"\"Unlock the structure so changes could be made.\"\"\"\n with self.base.get_session() as session:\n session.query(ScenarioStructureLockRecord).filter_by(scenario_id=self.__id).delete()\n session.commit()\n\n def attach_structure(self, attachment_nodes):\n \"\"\"Attach nodes structured as tree to the scenario structure.\n\n :param attachment_nodes: dictionary with tree path as keys and list of instances of\n :class:`_AttachmentNode `\n as values.\n \"\"\"\n scenario_id = self.__id\n changed_date = datetime.utcnow()\n\n def _create_node_records(child_path_pattern, tree_paths, attachment_nodes):\n \"\"\"Helpful method to create records for new nodes.\n\n :param child_path_pattern: pattern to create tree_path for node. Must contain {0}.\n :param tree_paths: set of existent tree_paths.\n :attachment_nodes: list of instances of\n :class:`_AttachmentNode `\n\n :returns: list of :class:`ScenarioStructureStateRecord\n `.\n \"\"\"\n node_records = []\n index = -1\n for attachment_node in attachment_nodes:\n index += 1\n tree_path = child_path_pattern.format(index)\n while tree_path in tree_paths:\n index += 1\n tree_path = child_path_pattern.format(index)\n\n node_record = ScenarioStructureStateRecord(\n node_id=attachment_node.get_node_id(),\n scenario_id=scenario_id,\n tree_path=tree_path,\n changed=changed_date,\n )\n node_records.append(node_record)\n\n children_node_records = _create_node_records(\n child_path_pattern=tree_path + '-{0}',\n tree_paths=tree_paths,\n attachment_nodes=attachment_node.get_children()\n )\n node_records.extend(children_node_records)\n\n return node_records\n\n self._lock_structure()\n try:\n records_to_create = []\n\n current_tree_paths = set(self.get_structure().keys())\n for parent_tree_path, attachment_trees in attachment_nodes.items():\n if parent_tree_path is None:\n path_pattern = \"{0}\"\n elif parent_tree_path not in current_tree_paths:\n continue\n else:\n path_pattern = parent_tree_path + \"-{0}\"\n\n records_to_create.extend(_create_node_records(\n path_pattern, current_tree_paths, attachment_trees\n ))\n\n with self.base.get_session() as session:\n session.add_all(records_to_create)\n session.commit()\n finally:\n self._unlock_structure()\n\n def detach_structure(self, tree_paths):\n \"\"\"Detach nodes from the scenario structure.\n\n :param tree_paths: list of tree paths to detach.\n \"\"\"\n scenario_id = self.__id\n changed_date = datetime.utcnow()\n\n self._lock_structure()\n try:\n current_structure = self.get_structure()\n records_to_create = []\n for tree_path in tree_paths:\n if tree_path not in current_structure:\n continue\n\n state_record = ScenarioStructureStateRecord(\n node_id=current_structure[tree_path],\n scenario_id=scenario_id,\n tree_path=tree_path,\n changed=changed_date,\n enabled=False\n )\n\n records_to_create.append(state_record)\n current_structure.pop(tree_path)\n\n child_path_prefix = tree_path + '-'\n current_paths = tuple(current_structure.keys())\n for path in current_paths:\n if path.startswith(child_path_prefix):\n state_record = ScenarioStructureStateRecord(\n node_id=current_structure[path],\n scenario_id=scenario_id,\n tree_path=path,\n changed=changed_date,\n enabled=False\n )\n records_to_create.append(state_record)\n current_structure.pop(path)\n\n with self.base.get_session() as session:\n session.add_all(records_to_create)\n session.commit()\n finally:\n self._unlock_structure()\n\n\nScenarioViewState = namedtuple(\"ScenarioViewState\", \"scenario name description date\")\n", "sub_path": "src/autostorage/core/scenario/scenario.py", "file_name": "scenario.py", "file_ext": "py", "file_size_in_byte": 11360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "autostorage.core._base._BaseClient", "line_number": 28, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.scenario.Scenario", "line_number": 49, "usage_type": "argument"}, {"api_name": "autostorage.core.param_spec.Pagination", "line_number": 73, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.view_state.ScenarioViewState", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "autostorage.core.param_spec.Pagination", "line_number": 92, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.view_state.ScenarioViewState", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Query", "line_number": 114, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.scenario_id", "line_number": 115, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 115, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.tree_path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 116, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.func.max", "line_number": 117, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.func", "line_number": 117, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.changed", "line_number": 117, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 117, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.changed", "line_number": 125, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 125, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.tree_path.like", "line_number": 128, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.tree_path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 128, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.scenario_id", "line_number": 133, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 133, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.tree_path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 134, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Query", "line_number": 137, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 138, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 141, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.scenario_id", "line_number": 142, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 142, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.tree_path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 143, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.changed", "line_number": 144, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 144, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState.enabled", "line_number": 147, "usage_type": "attribute"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 147, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureLock", "line_number": 160, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 165, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureLock", "line_number": 173, "usage_type": "argument"}, {"api_name": "datetime.datetime.utcnow", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 252, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 252, "usage_type": "name"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 262, "usage_type": "call"}, {"api_name": "autostorage.database.model.scenario.structure_state.ScenarioStructureState", "line_number": 277, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 294, "usage_type": "call"}]} {"seq_id": "323714304", "text": "# Written by Niveditha Kalavakonda (nkalavak@uw.edu)\n# Fall 2017 - Dataset Generation\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport imutils\nimport xml.etree.ElementTree as ET\n#from shapely.geometry import Polygon, Point\nimport matplotlib.path as mplPath\n\n\n# Look-Up Table for Different Segmentation Parts:\n# -------------------------------------------------\n# 0 Instrument \n# 255 Tissue\n\n\n# Look-Up Table for Different Instrument Types:\n# -------------------------------------------------\n# 1 Suction\n# 2 Electrocautery\n# 3 Grasper\n# 4 Cutter\n# 5 Bipolar\n\nimage_path = 'D:/01 Projects/AMAZON CATALYST PROJECT/ColorStats_ToLoad'\n\nsave_path = 'D:/01 Projects/AMAZON CATALYST PROJECT/Dataset'\n\n#XML Parsing using ElementTree\nxml_path = 'D:/01 Projects/AMAZON CATALYST PROJECT/xml_100instruments'\n\nr_start = 0\nc_start = 0\nn_rows = 480 #475\nn_cols = 720 #520\nn_images = 20 #100\n\nbackground = [0, 0, 0]\ninstr = [255, 255, 255]\nstart_image_index = 70 #300\n\n\nfor img in range(start_image_index, start_image_index + n_images):\n#img = 14\n index = img #'%03d' % img\n orig_image = cv2.imread(image_path + '/frame' + str(index) + '.jpg')\n\n #To rotate and flip image\n\n #new_image1 = cv2.flip(orig_image)\n ##final_image = imutils.rotate_bound(new_image1,90)\n\n \"\"\"rows,cols,_ = orig_image.shape\n new_image1 = cv2.flip(orig_image,0)\n M = cv2.getRotationMatrix2D((cols/2,rows/2),-90,1)\n final_image = cv2.warpAffine(new_image1, M, (cols,rows))\"\"\"\n #cv2.imshow(\"Flipped Image\", final_image)\n #print (orig_image.shape)\n #print (final_image.shape)\n\n new_image = np.zeros(orig_image.shape)\n\n #n_rows,n_cols = n_cols,n_rows\n\n tree = ET.parse(xml_path + '/frame' + str(index) + '.xml')\n root = tree.getroot()\n \n x = []\n y = []\n poly1_s = None\n poly1_a = None\n poly2_l = None\n poly3_l = None\n poly4_l = None\n poly5_l = None\n poly6 = None\n poly2_r = None\n poly3_r = None\n poly4_r = None\n poly5_r = None\n poly7 = None\n poly8 = None\n for neighbor in root.iter('object'): \n for charac in neighbor.findall('polygon'):\n points = [] \n for verts in charac.iter('pt'):\n y.append(int(verts.find('x').text))\n x.append(int(verts.find('y').text)) \n #print (x)\n #print (y)\n\n points = zip(x,y)\n if(neighbor.find('name').text == 'suction' and (neighbor.find('attributes').text == 'surgeon suction' or neighbor.find('attributes').text == None)):\n poly1_s = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'suction' and neighbor.find('attributes').text == 'assistant suction'):\n poly1_a = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'electrocautery' and (neighbor.find('attributes').text == 'left cautery' or neighbor.find('attributes').text == None)):\n poly2_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'electrocautery' and neighbor.find('attributes').text == 'right cautery'):\n poly2_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'grasper' and (neighbor.find('attributes').text == 'left grasper' or neighbor.find('attributes').text == None)):\n poly3_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'grasper' and neighbor.find('attributes').text == 'right grasper'):\n poly3_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'cutter' and (neighbor.find('attributes').text == 'left cutter' or neighbor.find('attributes').text == None)):\n poly4_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'cutter' and neighbor.find('attributes').text == 'right cutter'):\n poly4_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'pickup' and (neighbor.find('attributes').text == 'left pickup' or neighbor.find('attributes').text == None)):\n poly5_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'pickup' and neighbor.find('attributes').text == 'right pickup'):\n poly5_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'curette'):\n poly6 = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'drill'):\n poly7 = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'others'):\n poly8 = mplPath.Path(list(points))\n \"\"\"if(neighbor.find('name').text == 'surgeon suction' or neighbor.find('name').text == 'suction'):\n poly1_s = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'assistant suction'):\n poly1_a = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'left cautery' or neighbor.find('name').text == 'electrocautery'):\n poly2_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'right cautery'):\n poly2_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'left grasper' or (neighbor.find('name').text == 'grasper')):\n poly3_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'right grasper'):\n poly3_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'cutter' or (neighbor.find('name').text == 'left cutter')):\n poly4_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'right cutter'):\n poly4_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'pickup' or (neighbor.find('name').text == 'left pickup')):\n poly5_l = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'right pickup'):\n poly5_r = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'curette'):\n poly6 = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'drill'):\n poly7 = mplPath.Path(list(points))\n if(neighbor.find('name').text == 'others'):\n poly8 = mplPath.Path(list(points))\"\"\"\n\n x[:] = []\n y[:] = []\n for c in range(c_start + n_cols):\n for r in range(r_start + n_rows):\n\n #Check if point is present inside suction mask\n if((poly1_a != None and poly1_a.contains_point((r,c)) == True) or (poly1_s != None and poly1_s.contains_point((r,c)) == True)):\n new_image[r,c] = instr#orig_image[r,c]\n\n elif((poly2_l != None and poly2_l.contains_point((r,c)) == True) or (poly2_r != None and poly2_r.contains_point((r,c)) == True)):\n new_image[r,c] = instr#orig_image[r,c]#instr#\n\n elif((poly3_l != None and poly3_l.contains_point((r,c)) == True) or (poly3_r != None and poly3_r.contains_point((r,c)) == True)):\n new_image[r,c] = instr#orig_image[r,c]#instr\n\n elif((poly4_l != None and poly4_l.contains_point((r,c)) == True) or (poly4_r != None and poly4_r.contains_point((r,c)) == True)):\n new_image[r,c] = instr#orig_image[r,c]#instr\n\n elif((poly5_l != None and poly5_l.contains_point((r,c)) == True) or (poly5_r != None and poly5_r.contains_point((r,c)) == True)):\n new_image[r,c] = instr#orig_image[r,c]#instr\n\n elif(poly6 != None and poly6.contains_point((r,c)) == True):\n new_image[r,c] = instr#orig_image[r,c]#instr\n\n elif(poly7 != None and poly7.contains_point((r,c)) == True):\n new_image[r,c] = instr#orig_image[r,c]#instr\n\n elif(poly8 != None and poly8.contains_point((r,c)) == True):\n new_image[r,c] = instr#orig_image[r,c]#instr\n\n else:\n new_image[r,c] = background\n\n cv2.imwrite(save_path+'/frame'+str(index)+'.jpg',new_image)\n print (\"frame \"+ str(index) +\" : done!\")\n", "sub_path": "dataGeneration/groundTruth_generation.py", "file_name": "groundTruth_generation.py", "file_ext": "py", "file_size_in_byte": 8189, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cv2.imread", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 67, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.path.Path", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.path", "line_number": 120, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 181, "usage_type": "call"}]} {"seq_id": "198740014", "text": "\"\"\"This page is for more details\"\"\"\nimport logging\nfrom typing import List\n\nimport streamlit as st\n\nimport awesome_streamlit as ast\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport numpy as np\n\n# Get an instance of a logger\nlogging.basicConfig(format=\"%(asctime)s - %(message)s\", level=logging.INFO)\ndef write():\n \"\"\"Writes content to the app\"\"\"\n #ast.shared.components.title_awesome(\"Detail\") # Titel Awesome_Streamlit\n \n # Page title\n st.title(\"Detailed view\")\n \n #-----------------Markdown info CSS-----------------\n st.markdown('''\n \n \n ''', unsafe_allow_html=True)\n\n st.markdown('''\n
ⓘ\n \n Pie Chart
\n The pie chart shows the age distribution worldwide for the selected year. \n

\n Sankey Diagram
\n The Sankey diagram shows the top 10 origin (left) and target (right) countries \n as well as the distribution of asylum seekers per year.\n

\n Line Chart
\n The line chart shows the development of total annual asylum applications over the years.\n
\n
\n ''', unsafe_allow_html=True) \n\n \n # read csv for sankey diagram\n show_df = pd.read_csv('https://raw.githubusercontent.com/hannahkruck/visuasyl/master/src/datasets/Sankey_Diagramm.csv',sep = ';')\n\n #-----------------Slider-------------------\n # Create Slider and get from above read csv min and max years \n year = st.slider(\"\", (int(show_df[\"Year\"].min())),(int(show_df[\"Year\"].max())))\n # Variable year for both diagrams\n selected_year = year\n\n\n #-----------------Page Layout--------------\n # Layout setting of the page \n c1, c2 = st.beta_columns((1, 1))\n container = st.beta_container()\n st.write('', unsafe_allow_html=True)\n\n\n #------------------Create Sankey diagram-------------------------------\n # https://www.geeksforgeeks.org/sankey-diagram-using-plotly-in-python/\n # https://coderzcolumn.com/tutorials/data-science/how-to-plot-sankey-diagram-in-python-jupyter-notebook-holoviews-and-plotly#2\n \n # Variable year for Sankey diagram\n yearVar = selected_year \n \n # year\n yearRows = show_df[show_df['Year'] != yearVar].index\n show_df.drop(yearRows , inplace=True)\n\n # Nodes, links, colors\n label_souce = show_df['Label_Source'].dropna(axis=0, how='any')\n label_souce2 = []\n elementVar = ''\n\n # \n for i in label_souce: \n if(i != elementVar) : \n label_souce2.append(i)\n elementVar = i\n\n # \n label_target = show_df['Label_Target'].dropna(axis=0, how='any')\n label = [*label_souce2, *label_target]\n source = show_df['Source'].dropna(axis=0, how='any')\n target = show_df['Target'].dropna(axis=0, how='any')\n value = show_df['Value'].dropna(axis=0, how='any')\n\n # setting color for node and link\n color_node = [\n # Source color order Syria, Afghanistan, Venezuela, Irak, Colombia, Pakistan, Tรผrkei, Nigeria, Iran, Albania\n '#40bf77', '#93beec', '#1ff91f', '#cd8162', '#a6a6a6', '#80e5ff', '#b299e6', '#ff33ff', '#CDC037', '#ff6a6a',\n # Target color order\n '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641']\n\n color_link = [\n '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', \n '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', \n '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be',\n '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1',\n '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', \n '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', \n '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb',\n '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', \n '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', \n '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc',] \n\n # data to dict, dict to sankey\n link = dict(source = source, target = target, value = value, color = color_link)\n node = dict(label = label, pad = 20, thickness = 10, color = color_node)\n layout = dict(\n height = 800, \n font = dict(\n size = 11),\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=30,\n ),\n )\n \n data = go.Sankey(link = link, node=node)\n \n # properties sankey diagram layout \n fig2 = go.Figure(data, layout= layout)\n\n #-----------------Create pie chart-------------------\n # https://jasonxqh.github.io/2020/07/12/plotlyๅŸบ็ก€/\n \n # read csv for pie chart\n df = pd.read_csv('https://raw.githubusercontent.com/hannahkruck/visuasyl/master/src/datasets/Piechart.csv',sep = ';')\n\t\n # show specific value if a choosen year is selected on slider\n labels = df['year'].tolist()\n if selected_year == 2010:\n values = df['2010'].tolist()\n if selected_year == 2011:\n values = df['2011'].tolist() \n if selected_year == 2012:\n values = df['2012'].tolist() \n if selected_year == 2013:\n values = df['2013'].tolist()\n if selected_year == 2014:\n values = df['2014'].tolist()\n if selected_year == 2015:\n values = df['2015'].tolist()\n if selected_year == 2016:\n values = df['2016'].tolist()\n if selected_year == 2017:\n values = df['2017'].tolist()\n if selected_year == 2018:\n values = df['2018'].tolist()\n if selected_year == 2019:\n values = df['2019'].tolist()\n \n # define color sets \n colors = ['#e6f2ff', '#b3d9ff', '#80bfff', '#3386E6']\n\n # create pie figure\n fig1 = go.Figure(data=[go.Pie(\n labels = labels, \n values = values, \n insidetextorientation = 'radial',\n hole = 0.399,)])\n\n # update settings hover and text informations\n fig1.update_traces(\n hoverinfo = 'label+percent+value', \n textinfo = 'percent+label',\n textfont_size = 11,\n marker = dict(colors = colors, \n line = dict(color = 'lightskyblue', width = 0.1))) \n\t\n # update settings layout\n fig1.update_layout(dict(\n height = 400,\n font = dict(\n size = 12)))\n\n # add annotations in the center of the donut pie\n fig1.update_layout(\n annotations=[dict(\n text='Age
Distribution
', \n font_size=12, \n showarrow=False),],\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=50,\n ),\n )\n#------------Create Timeline Years V. 2.0-------------------\n # read CSV for the histogram graph\n df = pd.read_csv(\"https://raw.githubusercontent.com/hannahkruck/visuasyl/master/src/datasets/Histogramm.csv\",encoding =\"utf8\", sep = \";\")\n \n # use years for the x-axis and the worldwide amount of asylum applications for the y-axis\n fig3 = go.Figure(go.Scatter(x = df['year'], y = df['asylum_applications_worldwide']))\n \n # customizing the graph\n fig3.update_layout(\n # customize height\n height=100,\n # hide labels\n yaxis={'visible': False, 'showticklabels': False\n },\n # show every year as a label below\n xaxis={'type': 'category'},\n # create white background to match with initial background of streamlit\n plot_bgcolor='rgb(255,255,255)',\n\n # set all margins and padding to zero to create full width graph\n margin=go.layout.Margin(\n l=0,\n r=0,\n b=0,\n t=0,\n pad = 0\n )\n)\n\n with c1:\n st.subheader('Asylum seekers by age in Europe in the year %s' % selected_year) \n st.plotly_chart(fig1, use_container_width=True,config={'modeBarButtonsToRemove': ['lasso2d','select2d', 'pan2d', 'hoverClosestPie']})\n with c2:\n st.subheader('Top 10 Distribution of a Countries Asylum Applications among the various Countries of Destination %s' % selected_year)\n st.plotly_chart(fig2, use_container_width=True, config={'modeBarButtonsToRemove': ['lasso2d','select2d', 'pan2d', 'hoverClosestCartesian', 'hoverCompareCartesian']})\n with container:\n st.plotly_chart(fig3, use_container_width=True, config=dict(displayModeBar=False))\n \nif __name__ == \"__main__\":\n write()\n", "sub_path": "src/pages/detail.py", "file_name": "detail.py", "file_ext": "py", "file_size_in_byte": 10022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "streamlit.title", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 75, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.beta_columns", "line_number": 86, "usage_type": "call"}, {"api_name": "streamlit.beta_container", "line_number": 87, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 88, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Sankey", "line_number": 154, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 154, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 157, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 157, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 163, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 192, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 192, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Pie", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 227, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 230, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 230, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 230, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout.Margin", "line_number": 245, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout", "line_number": 245, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 245, "usage_type": "name"}, {"api_name": "streamlit.subheader", "line_number": 255, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 256, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 258, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 259, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 261, "usage_type": "call"}]} {"seq_id": "334900873", "text": "# Assign analytes to the right workflow for library QC, based on which prep was\n# used.\nimport sys\nimport re\nfrom collections import defaultdict\nfrom genologics.lims import *\nfrom genologics import config\n\nDEFAULT_WORKFLOW = \"NSC QC\"\nWORKFLOW_MAPPING = {\n 'NSC_16S': 'NSC 16S'\n }\n\n\ndef main(process_id):\n lims = Lims(config.BASEURI, config.USERNAME, config.PASSWORD)\n process = Process(lims, id=process_id)\n analytes = process.all_inputs(unique=True, resolve=True)\n\n workflow_analytes = defaultdict(list)\n # Identify workflow for each of the samples\n for analyte in analytes:\n sample_prep_used = analyte.samples[0].udf.get('Sample prep NSC')\n workflow = WORKFLOW_MAPPING.get(sample_prep_used, DEFAULT_WORKFLOW)\n workflow_analytes[workflow].append(analyte)\n\n # Load all workflows in the system\n workflow_data = lims.get_workflows(add_info=True)\n\n # Look up each workflow name in the list of workflows, and assign it if available\n for workflow_prefix, analytes in workflow_analytes.items():\n for workflow, info in zip(*workflow_data):\n if info['status'] == \"ACTIVE\" and info['name'].startswith(workflow_prefix):\n lims.route_analytes(analytes, workflow)\n break\n else:\n print ((\"Error: Unknown workflow '{}' for samples \".format(workflow_prefix) +\n \", \".join(analyte.name for analyte in analytes) +\n \".\"))\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main(*sys.argv[1:])\n\n", "sub_path": "sample-prep/nsc-post-prep-workflow-routing.py", "file_name": "nsc-post-prep-workflow-routing.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "genologics.config.BASEURI", "line_number": 16, "usage_type": "attribute"}, {"api_name": "genologics.config", "line_number": 16, "usage_type": "name"}, {"api_name": "genologics.config.USERNAME", "line_number": 16, "usage_type": "attribute"}, {"api_name": "genologics.config.PASSWORD", "line_number": 16, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}]} {"seq_id": "39705398", "text": "#!/usr/bin/python3\n# coding: utf-8\n## ๆ•ฐๆฎ้›† MovieLens\n# MovieLens ๆ•ฐๆฎ้›†ๅŒ…ๅซๅคšไธช็”จๆˆทๅฏนๅคš้ƒจ็”ตๅฝฑ็š„่ฏ„็บงๆ•ฐๆฎ, ไนŸๅŒ…ๆ‹ฌ็”ตๅฝฑๅ…ƒๆ•ฐๆฎไฟกๆฏๅ’Œ็”จๆˆทๅฑžๆ€งไฟกๆฏ; ่ฟ™ไธชๆ•ฐๆฎ้›†็ปๅธธ็”จๆฅๅšๆŽจ่็ณป็ปŸ, ๆœบๅ™จๅญฆไน ็ฎ—ๆณ•็š„ๆต‹่ฏ•ๆ•ฐๆฎ้›†\n# ๆœฌๆ–‡ๆ‰€็”จ็š„ๆ•ฐๆฎไธบ ml-latest-small.zip; ๆœฌๆ–‡ไธป่ฆไฝฟ็”จๅ…ถไธญ ratings.csvใ€movies.csv ไธคไธชๆ•ฐๆฎ่กจ:\n\n## ratings ๆ•ฐๆฎ\n# userId: ๆฏไธช็”จๆˆท็š„ id\n# movieId: ๆฏ้ƒจ็”ตๅฝฑ็š„ id\n# rating: ็”จๆˆท่ฏ„ๅˆ†, ๆ˜ฏ 5 ๆ˜Ÿๅˆถ, ๆŒ‰ๅŠ้ข—ๆ˜Ÿ็š„่ง„ๆจก้€’ๅขž(0.5 stars 5 stars)\n# timestamp: ่‡ช 1970 ๅนด 1 ๆœˆ 1 ๆ—ฅ้›ถ็‚นๅŽๅˆฐ็”จๆˆทๆไบค่ฏ„ไปท็š„ๆ—ถ้—ด็š„็ง’ๆ•ฐ\n# ๆ•ฐๆฎๆŽ’ๅบ็š„้กบๅบๆŒ‰็…ง userId, movieId ๆŽ’ๅˆ—็š„\n# userId,movieId,rating,timestamp\n# 1,1,4.0,964982703\n# 1,3,4.0,964981247\n# 1,6,4.0,964982224\n# 1,47,5.0,964983815\n\n## movies ๆ•ฐๆฎ\n# movieId: ๆฏ้ƒจ็”ตๅฝฑ็š„ id\n# title: ็”ตๅฝฑ็š„ๆ ‡้ข˜\n# genres: ็”ตๅฝฑ็š„็ฑปๅˆซ\n# movieId,title,genres\n# 1,Toy Story (1995),Adventure|Animation|Children|Comedy|Fantasy\n# 2,Jumanji (1995),Adventure|Children|Fantasy\n# 3,Grumpier Old Men (1995),Comedy|Romance\n# 4,Waiting to Exhale (1995),Comedy|Drama|Romance\n\nimport math\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nnp.random.seed(1024)\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n##################################################################\n## ไธ€: ๆ•ฐๆฎๅค„็†\nmoviesPath = '/Users/coder352/datasets/Struct_Data/MovieLens/ml-latest-small/movies.csv'\nratingsPath = '/Users/coder352/datasets/Struct_Data/MovieLens/ml-latest-small/ratings.csv'\nmoviesDF = pd.read_csv(moviesPath, index_col=None)\nratingsDF = pd.read_csv(ratingsPath, index_col=None)\n\n## ่ฟ™้‡Œๆˆ‘ไปฌๆŒ‰็…ง 4:1 ็š„ๆฏ”ไพ‹ๅฐ†ๆ•ฐๆฎ้›†่ฟ›่กŒๆ‹†ๅˆ†, ๅŒๆ—ถๆ‰“ๅฐๅ‡บๆ€ป็š„็”จๆˆทๅ’Œ็”ตๅฝฑๆ•ฐ้‡ใ€่ฎญ็ปƒ้›†ไธญ็š„็”จๆˆทๅ’Œ็”ตๅฝฑๆ•ฐ้‡ไปฅๅŠๆต‹่ฏ•้›†ไธญ็š„็”จๆˆทๅ’Œ็”ตๅฝฑๆ•ฐ้‡:\ntrainRatingsDF, testRatingsDF = train_test_split(ratingsDF, test_size=0.2)\nprint(\"total_movie_count: \" + str(len(set(ratingsDF['movieId'].values.tolist())))) # total_movie_count: 9724\nprint(\"total_user_count: \" + str(len(set(ratingsDF['userId'].values.tolist())))) # total_user_count: 610\nprint(\"train_movie_count: \" + str(len(set(trainRatingsDF['movieId'].values.tolist())))) # train_movie_count: 9005\nprint(\"test_movie_count: \" + str(len(set(testRatingsDF['movieId'].values.tolist())))) # test_movie_count: 5128\nprint(\"train_user_count: \" + str(len(set(trainRatingsDF['userId'].values.tolist())))) # train_user_count: 610\nprint(\"test_user_count: \" + str(len(set(testRatingsDF['userId'].values.tolist())))) # test_user_count: 610\n\n## ไธ‹้ข, ไฝฟ็”จ pivot_table ๅพ—ๅˆฐ็”จๆˆท-็”ตๅฝฑ็š„่ฏ„ๅˆ†็Ÿฉ้˜ต, ๆœฌๆ–‡ไธญๅพ—ๅˆฐ 610*8981 ็š„่ฏ„ๅˆ†็Ÿฉ้˜ต\ntrainRatingsPivotDF = pd.pivot_table(trainRatingsDF[['userId', 'movieId', 'rating']], columns=['movieId'], index=['userId'], values='rating', fill_value=0)\n\n## ๅพ—ๅˆฐ็”ตๅฝฑ idใ€็”จๆˆท id ไธŽๅ…ถ็ดขๅผ•็š„ๆ˜ ๅฐ„ๅ…ณ็ณป:\n# enumerate ่ฟ”ๅ›ž็ฉทไธพๅบๅˆ—ๅทไธŽๅ€ผ\nmoviesMap = dict(enumerate(list(trainRatingsPivotDF.columns))) # 9005 ้ƒจ็”ตๅฝฑ\nusersMap = dict(enumerate(list(trainRatingsPivotDF.index))) # 610 ไธช็”จๆˆท\nratingValues = trainRatingsPivotDF.values.tolist() # ็Ÿฉ้˜ตๅ˜ๆˆ list ๆฏไธ€่กŒๅ˜ๆˆ list ็š„ไธ€ไธชๅ€ผ; ้•ฟๅบฆไธบ 610 ๆฏไธชๅ€ผๅคงๅฐไธบ 9005; ๆ„ๆ€ๅฐฑๆ˜ฏ 610 ไธช็”จๆˆท, ๆฏไธช็”จๆˆทไธ€ไธช 9005 ็š„็‰นๅพๅ‘้‡\nprint(len(ratingValues)) # 610\nprint(len(ratingValues[0]), len(ratingValues[0])) # 9005 9005\n\n##################################################################\n## ไบŒ: ็›ธไผผๅบฆ่ฎก็ฎ—\n## ็”จๆˆท็›ธไผผๅบฆ่ฎก็ฎ—\n# ๅˆฉ็”จไฝ™ๅผฆ็›ธไผผๅบฆ่ฎก็ฎ—็”จๆˆทไน‹้—ด็š„็›ธไผผๅบฆ\ndef calCosineSimilarity(list1, list2):\n res = 0\n denominator1 = 0\n denominator2 = 0\n for (val1, val2) in zip(list1, list2):\n res += (val1 * val2)\n denominator1 += val1 ** 2\n denominator2 += val2 ** 2\n return res / (math.sqrt(denominator1 * denominator2))\n\n## ๆ นๆฎไธŠ้ข็š„็›ธไผผๅบฆๅ‡ฝๆ•ฐ, ่ฎก็ฎ—็”จๆˆทไน‹้—ด็š„็›ธไผผๅบฆ็Ÿฉ้˜ต(610*610)\n# ๆ นๆฎ็”จๆˆทๅฏน็”ตๅฝฑ็š„่ฏ„ๅˆ†, ๆฅๅˆคๆ–ญๆฏไธช็”จๆˆท้—ด็›ธไผผๅบฆ\nuserSimMatrix = np.zeros((len(ratingValues), len(ratingValues)), dtype=np.float32)\nfor i in range(len(ratingValues) - 1): # ่ฟ™ไธชๅพช็Žฏๅคง็บฆ 3min\n for j in range(i + 1, len(ratingValues)):\n userSimMatrix[i, j] = calCosineSimilarity(ratingValues[i], ratingValues[j])\n userSimMatrix[j, i] = userSimMatrix[i, j]\n\n## ๆŽฅไธ‹ๆฅ, ๆˆ‘ไปฌ่ฆๆ‰พๅˆฐไธŽๆฏไธช็”จๆˆทๆœ€็›ธ่ฟ‘็š„ K ไธช็”จๆˆท, ็”จ่ฟ™ K ไธช็”จๆˆท็š„ๅ–œๅฅฝๆฅๅฏน็›ฎๆ ‡็”จๆˆท่ฟ›่กŒ็‰ฉๅ“ๆŽจ่, ่ฟ™้‡Œ K=10, ไธ‹้ข็š„ไปฃ็ ็”จๆฅ่ฎก็ฎ—ไธŽๆฏไธช็”จๆˆทๆœ€็›ธ่ฟ‘็š„ 10 ไธช็”จๆˆท:\n# ๆ‰พๅˆฐไธŽๆฏไธช็”จๆˆทๆœ€็›ธ่ฟ‘็š„ๅ‰ K ไธช็”จๆˆท\nuserMostSimDict = dict()\nfor i in range(len(ratingValues)):\n userMostSimDict[i] = sorted(enumerate(list(userSimMatrix[i])), key=lambda x: x[1], reverse=True)[:10]\nprint(userMostSimDict[0]) # [(265, 0.32571146), (44, 0.31304061), (468, 0.29434624), (312, 0.28957412), (134, 0.28880805), (451, 0.282516), (201, 0.28025714), (367, 0.27581462), (216, 0.27418706), (63, 0.27265775)]\n\n##################################################################\n## ไธ‰: ๆŽจ่็”ตๅฝฑ\n# ๅพ—ๅˆฐไบ†ๆฏไธช็”จๆˆทๅฏนๅบ”็š„ 10 ไธชๅ…ด่ถฃๆœ€็›ธ่ฟ‘็š„็”จๆˆทไน‹ๅŽ, ๆˆ‘ไปฌๆ นๆฎไธ‹้ข็š„ๅ…ฌๅผ่ฎก็ฎ—็”จๆˆทๅฏนๆฏไธชๆฒกๆœ‰่ง‚็œ‹่ฟ‡็š„็”ตๅฝฑ็š„ๅ…ด่ถฃๅˆ†:\n# ็”จ่ฟ™ K ไธช็”จๆˆท็š„ๅ–œๅฅฝไธญ็›ฎๆ ‡็”จๆˆทๆฒกๆœ‰็œ‹่ฟ‡็š„็”ตๅฝฑ่ฟ›่กŒๆŽจ่\nuserRecommendValues = np.zeros((len(ratingValues), len(ratingValues[0])), dtype=np.float32) # 610 * 9005\nfor i in range(len(ratingValues)): # ่ฟ™ไธชๅพช็ŽฏไนŸๆŒบๆ…ข็š„, ๅคง็บฆ 3min\n for j in range(len(ratingValues[i])):\n if ratingValues[i][j] == 0:\n val = 0\n for (user, sim) in userMostSimDict[i]:\n val += (ratingValues[user][j] * sim) # user ็”จๆˆทๅฏน็ฌฌ j ้ƒจ็”ตๅฝฑ็š„ๆ‰“ๅˆ† * ็›ธไผผๅบฆ\n userRecommendValues[i, j] = val\nprint(userRecommendValues.max()) # 23.9331; ๅฏ่ƒฝๆœ€ๅŽ็š„ๅˆ†ๆ•ฐๅคงไบŽ 5\n\n# ๆŽฅไธ‹ๆฅไธบๆฏไธช็”จๆˆทๆŽจ่ 10 ้ƒจ็”ตๅฝฑ:\nuserRecommendDict = dict()\nfor i in range(len(ratingValues)):\n userRecommendDict[i] = sorted(enumerate(list(userRecommendValues[i])), key=lambda x: x[1], reverse=True)[:10]\nprint(userRecommendDict[0]) # [(497, 11.047806), (937, 9.8974247), (1185, 9.3699417), (879, 9.3593016), (1034, 9.3171129), (881, 9.312067), (643, 8.8374186), (1610, 8.502533), (837, 7.9373536), (1270, 7.7954793)]\n\n# ๅฐ†ไปฅไธŠไปฃ็ ไธญไฝฟ็”จ็š„็ดขๅผ•้ฆ–ๅ…ˆ่ฝฌๆขไธบ็”จๆˆท id ไธŽ็”ตๅฝฑ id, ๅ†ๅฐ†็”ตๅฝฑ id ่ฝฌๆขๆˆ็”ตๅฝฑๅ:\n# ๅฐ†ไธ€ๅผ€ๅง‹็š„็ดขๅผ•่ฝฌๆขไธบๅŽŸๆฅ็”จๆˆท id ไธŽ็”ตๅฝฑ id\nuserRecommendList = []\nfor key, value in userRecommendDict.items():\n user = usersMap[key]\n for (movieId, val) in value:\n userRecommendList.append([user, moviesMap[movieId]])\n\n# ๅฐ†ๆŽจ่็ป“ๆžœ็š„็”ตๅฝฑ id ่ฝฌๆขๆˆๅฏนๅบ”็š„็”ตๅฝฑๅ\nrecommendDF = pd.DataFrame(userRecommendList, columns=['userId', 'movieId'])\nrecommendDF = pd.merge(recommendDF, moviesDF[['movieId', 'title']], on='movieId', how='inner')\nprint(recommendDF.tail(10))\n\n##################################################################\n## Tips\n# UserCF ๅœจ็”จๆˆท็‰นๅพ็ปดๅบฆ่พƒ้ซ˜(ๆœฌๆ–‡ๆœ‰ 8981 ็ปด)็š„ๆƒ…ๅ†ตไธ‹, ็ฎ—ๆณ•ๆ•ˆ็އ่ฟ˜ๆ˜ฏๆŒบไฝŽ็š„\n", "sub_path": "bin/template/src/jptalgorithm/l104_UserCF.py", "file_name": "l104_UserCF.py", "file_ext": "py", "file_size_in_byte": 7201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.random.seed", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pandas.set_option", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.pivot_table", "line_number": 56, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 125, "usage_type": "call"}]} {"seq_id": "168972782", "text": "import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plot\n\ndef converted(p):\n\tp[0]=p[0]/2\n\tp[1]=p[1]*255/100\n\tp[2]=p[2]*255/100\n\treturn np.array(p)\n\ndef imgNeg(image):\n\treturn 255-image\n\t\n\ncap=cv2.VideoCapture(0)\nwhile(1):\n\t#_,raw=cap.read()\n\t#daw=cv2.Canny(raw,100,250)\t#Intensity gradient-Min to max\n\t#hsv=cv2.cvtColor(raw, cv2.COLOR_BGR2HSV)\n\n\tim = cv2.imread('Pictures/Webcam/2015-05-18-035043.jpg')\n\timgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\tthresh = cv2.threshold(imgray,127,255,0)\n\tcontours= cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\tim = cv2.drawContours(im, contours, -1, (0,255,0), 3)\n\n\t#canny=cv2.Canny(hsv,100,250)\n\t#canny=imgNeg(canny)\n\t\n\t#lower_white=[0,0,65]\t#Fine tuned values trained specifically to recognise my tshirt! 145,23,12 also works!\n\t#upper_white=[360,22,100]\n\t#lower_blue=[209,30,10]\n\t#upper_blue=[265,90,97]\n\t#lower_red=[322,45,5]\n\t#upper_red=[360,80,90]\n\n\t#wmask=cv2.inRange(hsv,converted(lower_white),converted(upper_white))\n\t#wmask=cv2.medianBlur(wmask,5)\n\t#wmask=cv2.GaussianBlur(wmask,(5,5),0)\n\t\n\t#bmask=cv2.inRange(hsv,converted(lower_blue),converted(upper_blue))\n\t#rmask=cv2.inRange(hsv,converted(lower_red),converted(upper_red))\n\t#rmask=cv2.medianBlur(rmask,5)\n\t#daw=cv2.Canny(rmask,100,250)\n\t#whitened_mask=cv2.bitwise_and(raw,raw,mask=wmask)\n\t#blued_mask=cv2.bitwise_and(raw,raw,mask=bmask)\n\t#reddened_mask=cv2.bitwise_and(raw,raw,mask=rmask)\n\t#rmask=cv2.medianBlur(rmask,5)\n\n\t#gray=cv2.cvtColor(raw,cv2.COLOR_BGR2GRAY)\n\t#thresh=cv2.threshold(gray,127,255,0)\n\t#contours=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\t#raw=cv2.drawContours(raw,contours,-1,(0,255,0),3)\n\n\tcv2.imshow('Its such a good vibration!',raw)\n\t#cv2.imshow('whitened Mask',whitened_mask)\n\t#cv2.imshow('Canny',daw)\n\t#cv2.imshow('Reddened Mask',rmask)\n\t#cv2.imshow('Trackin \\'n Crackin\\'!',daw)\n\t#cv2.imshow('Blued Mask',blued_mask)\n\t#cv2.imshow('My Sketch!',canny)\n\tk=cv2.waitKey(5) & 0xff\n\tif k==27:\n\t\tbreak\ncv2.destroyAllWindows()\n", "sub_path": "Codes/IP3.py", "file_name": "IP3.py", "file_ext": "py", "file_size_in_byte": 1995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 65, "usage_type": "call"}]} {"seq_id": "380761834", "text": "from unittest import mock\nfrom utils.komp2_api import get_allele2gene_map\nfrom utils.models import Allele\nimport pytest\nfrom sqlalchemy.exc import SQLAlchemyError\n\n\n@mock.patch('utils.komp2_api.Session')\ndef test_get_allele2gene_map_one_allele(mock_session):\n test_allele = Allele()\n test_allele.acc = \"MGI:4244706\"\n test_allele.gf_acc = \"MGI:1915571\"\n mock_session.return_value.query(Allele).all.return_value = [test_allele]\n allele_gene_map = get_allele2gene_map()\n assert(allele_gene_map[\"MGI:4244706\"] == \"MGI:1915571\")\n\n\n@mock.patch('utils.komp2_api.Session')\ndef test_get_allele2gene_map_no_alleles(mock_session):\n mock_session.return_value.query(Allele).all.return_value = []\n allele_gene_map = get_allele2gene_map()\n assert(allele_gene_map == {})\n\n\n@mock.patch('utils.komp2_api.Session')\ndef test_raises(mock_session):\n mock_session.return_value.query(Allele).all.side_effect = lambda: exec('raise(SQLAlchemyError(\"some info\"))')\n with pytest.raises(Exception) as excinfo:\n get_allele2gene_map()\n assert str(excinfo.value) == 'some info'\n", "sub_path": "tests/test_komp2_api.py", "file_name": "test_komp2_api.py", "file_ext": "py", "file_size_in_byte": 1089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "utils.models.Allele", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.models.Allele", "line_number": 13, "usage_type": "argument"}, {"api_name": "utils.komp2_api.get_allele2gene_map", "line_number": 14, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 8, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 8, "usage_type": "name"}, {"api_name": "utils.models.Allele", "line_number": 20, "usage_type": "argument"}, {"api_name": "utils.komp2_api.get_allele2gene_map", "line_number": 21, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.models.Allele", "line_number": 27, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.komp2_api.get_allele2gene_map", "line_number": 29, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 25, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 25, "usage_type": "name"}]} {"seq_id": "356960646", "text": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5.QtWidgets import QLCDNumber, QLabel, QLineEdit\nfrom PyQt5.QtGui import QPixmap\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n\n self.setGeometry(300, 300, 300, 400)\n self.setWindowTitle('Img')\n\n self.btn = QPushButton('ะŸะพะบะฐะทะฐั‚ัŒ', self)\n self.btn.resize(self.btn.sizeHint())\n self.btn.move(100, 80)\n self.btn.clicked.connect(self.hello)\n\n\n # _________input1____________________#\n self.name_label = QLabel(self)\n self.name_label.setText(\"ะ˜ะผั ั„ะฐะนะปะฐ: \")\n self.name_label.move(40, 50)\n\n self.name_input = QLineEdit(self)\n self.name_input.move(130, 50)\n\n self.pix_label = QLabel(self)\n self.pix_label.setText(\"testtesttesttesttesttesttesttest \\n srfgwrfgwerf\")\n self.pix_label.setGeometry(40, 150, 200, 200)\n\n def hello(self):\n a = self.name_input.text()\n pixmap = QPixmap(a)\n self.pix_label.setPixmap(pixmap)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec())\n", "sub_path": "QT/002.py", "file_name": "002.py", "file_ext": "py", "file_size_in_byte": 1229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}]} {"seq_id": "129734923", "text": "# SJSU CMPE 138Spring2021TEAM8\nimport simplejson as json\nfrom flask import Flask, make_response, jsonify, Blueprint, request\nfrom flask_mysql_connector import MySQL\nfrom flask import current_app\nimport random\nmysql = MySQL()\n\nanimals_api = Blueprint('animals_api', __name__)\n\n\n@animals_api.route('/animals', methods=['GET', 'POST', 'DELETE', 'PUT'])\ndef animals():\n if request.method == 'GET':\n try:\n conn = mysql.connection\n cursor = conn.cursor(dictionary=True)\n if 'species_id' in request.args:\n get_animal = \"\"\"SELECT * FROM Species \\\n WHERE species_id = %s\"\"\"\n cursor.execute(get_animal, (request.args.get('species_id'),))\n current_app.logger.info(\"Fetching species %s\",request.args.get('species_id'))\n rows = cursor.fetchone()\n else:\n get_animal = \"SELECT * FROM Species\"\n cursor.execute(get_animal)\n current_app.logger.info(\"Fetching all species in the park\")\n rows = cursor.fetchall()\n return make_response(json.dumps(rows), 200)\n except Exception as e:\n current_app.logger.error(e)\n return make_response(\"false\", 500)\n finally:\n cursor.close()\n conn.close()\n if request.method == 'POST':\n try:\n body = request.json\n post_animal = \"\"\"INSERT INTO Species (species_id,name,age,description,gender,category)\n VALUES ( %s, %s, %s, %s, %s, %s )\"\"\"\n species_id =random.randint(100, 999999)\n data = (\n species_id, body['name'], body['age'], body['description'], body['gender'], body['category'])\n conn = mysql.connection\n cursor = conn.cursor(dictionary=True)\n cursor.execute(post_animal, data)\n current_app.logger.info(\"Inserting species %s\", body['name'])\n conn.commit()\n return make_response(\"true\", 200)\n except Exception as e:\n current_app.logger.error(e)\n return make_response(\"false\", 500)\n finally:\n cursor.close()\n conn.close()\n if request.method == 'DELETE':\n try:\n delete_species = \"\"\"DELETE FROM Species where species_id = %s\"\"\"\n data = (request.args.get('species_id'),)\n conn = mysql.connection\n cursor = conn.cursor(dictionary=True)\n cursor.execute(delete_species, data)\n current_app.logger.info(\"Delete species %s\", request.args.get('species_id'))\n conn.commit()\n return make_response(\"true\", 200)\n except Exception as e:\n current_app.logger.error(e)\n return make_response(\"false\", 500)\n finally:\n cursor.close()\n conn.close()\n if request.method == 'PUT':\n try:\n body = request.json\n update_employee = \"\"\"UPDATE Species set species_id = %s,name = %s,age= %s, description= %s,gender= %s,\n category = %s WHERE species_id = %s\"\"\"\n data = (\n body['species_id'], body['name'], body['age'], body['description'], body['gender'], body['category'],\n body['species_id'])\n conn = mysql.connection\n cursor = conn.cursor(dictionary=True)\n cursor.execute(update_employee, data)\n current_app.logger.info(\"Update species %s\", body['name'])\n conn.commit()\n return make_response(\"true\", 200)\n except Exception as e:\n current_app.logger.error(e)\n return make_response(\"false\", 500)\n finally:\n cursor.close()\n conn.close()\n\n\n@animals_api.route('/animals-stats', methods=['GET'])\ndef getStats():\n try:\n conn = mysql.connection\n cursor = conn.cursor(dictionary=True, buffered=True)\n stats = {}\n count_query = \"SELECT COUNT(*) as total_count FROM Species;\"\n cursor.execute(count_query)\n row = cursor.fetchone()\n stats['total_count'] = row['total_count']\n\n gender_query = \"\"\"SELECT gender, COUNT(*) as count\n FROM Species\n GROUP BY gender;\"\"\"\n cursor.execute(gender_query)\n rows = cursor.fetchall()\n stats['gender_stats'] = rows\n\n category_query = \"\"\"SELECT category, COUNT(*) as count\n FROM Species\n GROUP BY category;\"\"\"\n cursor.execute(category_query)\n rows = cursor.fetchall()\n stats['category_stats'] = rows\n\n age_query = \"\"\"SELECT SUM(CASE WHEN age < 10 THEN 1 ELSE 0 END) AS 'Under 10',\n SUM(CASE WHEN age BETWEEN 11 AND 20 THEN 1 ELSE 0 END) AS '11-20',\n SUM(CASE WHEN age BETWEEN 21 AND 30 THEN 1 ELSE 0 END) AS '21-30',\n SUM(CASE WHEN age > 30 THEN 1 ELSE 0 END) AS '30 And More'\n FROM Species\"\"\"\n cursor.execute(age_query)\n rows = cursor.fetchall()\n stats['age_stats'] = rows\n return make_response(json.dumps(stats), 200)\n except Exception as e:\n print(e)\n return make_response(\"false\", 500)\n", "sub_path": "APPLICATION/backend/animal.py", "file_name": "animal.py", "file_ext": "py", "file_size_in_byte": 5275, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask_mysql_connector.MySQL", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 29, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.current_app.logger.info", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.current_app.logger.error", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 127, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 130, "usage_type": "call"}]} {"seq_id": "179965739", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport json\r\n\r\nimport email_sender\r\n\r\nCOL_title = 'Title'\r\nCOL_rating = 'Rating'\r\nCOL_review_count = 'Reviews'\r\nCOL_taste_profile = 'Taste Profile'\r\nCOL_taste_profile_order = 'Taste Profile Order'\r\nCOL_image_url_and_shop_url = 'Wine'\r\nCOL_image_url_and_shop_url_separator = '||'\r\nCOL_price = '$ Normal'\r\nCOL_discounted_price = '$ Discount'\r\nCOL_final_discount_percent = '% Discount'\r\nCOL_offer_type = 'Offer'\r\n\r\n\r\ndef get_number_pages(rode_wijn_page):\r\n li_wijn_rood = rode_wijn_page.find(\"li\", {\"class\": \"wijn-rood\"})\r\n quantity_span = li_wijn_rood.find('span').find('span')\r\n quantity_rode_wijn = quantity_span.string[1:-1]\r\n pages_qty = int(int(quantity_rode_wijn) / 12)\r\n return pages_qty\r\n\r\n\r\ndef get_wijn_df_columns():\r\n return [COL_title, COL_rating, COL_review_count, COL_taste_profile,\r\n COL_image_url_and_shop_url, COL_price, COL_discounted_price,\r\n COL_final_discount_percent, COL_offer_type]\r\n\r\n\r\ndef new_wijn_df_row(title, rating, review_count, taste_profile, image_url, price, discounted_price,\r\n offer_type):\r\n\r\n final_discounted_percent = round(((1 - float(discounted_price) / float(price)) * 100), 2)\r\n\r\n return [title, rating, review_count, taste_profile, image_url, price, discounted_price,\r\n final_discounted_percent,offer_type]\r\n\r\n\r\ndef parse_wiijn_row_from_article_tag(article):\r\n img = article.find('img',{'itemprop':'image'})\r\n\r\n title = img['alt']\r\n rating = article.find('ul',{'class':'rating-box'})['data-rating']\r\n try:\r\n h4 = article.find('h4')\r\n if h4:\r\n review_count = article.find('h4').contents[0]\r\n else:\r\n review_count = 0\r\n except:\r\n review_count = -2\r\n\r\n try:\r\n all_span_title = article.findAll('span',{'class':'title'})\r\n if len(all_span_title) > 1:\r\n taste_profile = all_span_title[1].string\r\n elif len(all_span_title) == 1:\r\n taste_profile = all_span_title[0].string\r\n else:\r\n taste_profile = 'not found'\r\n except:\r\n taste_profile = 'error'\r\n\r\n image_url = img['src'] + COL_image_url_and_shop_url_separator + 'https://www.gall.nl' + \\\r\n article.find('a')['href']\r\n price = json.loads(article['data-tracking-impression'])['price']\r\n\r\n sticker_title = article.find('div', {'class': 'sticker-title'})\r\n sticker_text = article.find('div',{'class':'sticker-text'})\r\n\r\n offer_type = \"\"\r\n if sticker_title:\r\n offer_type += str(sticker_title)\r\n\r\n if sticker_text:\r\n offer_type += str(sticker_text)\r\n\r\n if title.startswith('La Palma Ca') > 0:\r\n str(1)\r\n\r\n price_item_active = article.find('span', {'class': 'price item active'})\r\n span_price_discount = article.find('span', {'class': 'price'})\r\n\r\n if price_item_active:\r\n discounted_price = round(float(price_item_active.text),2)\r\n elif '2 halen, 1 betalen' in offer_type or '50% korting' in offer_type:\r\n discounted_price = round(float(price) / 2,2)\r\n elif '2e halve prijs' in offer_type:\r\n discounted_price = round(((float(price) / 2) + float(price)) / 2, 2)\r\n elif span_price_discount:\r\n discounted_price = round(float(span_price_discount.text), 2)\r\n else:\r\n discounted_price = price\r\n\r\n article.find('span',{'class':'price item active'})\r\n\r\n return new_wijn_df_row(\r\n title,\r\n rating,\r\n review_count,\r\n taste_profile,\r\n image_url,\r\n price,\r\n discounted_price,\r\n offer_type\r\n )\r\n\r\n\r\ndef get_wiijn_rows_from_page(rode_wijn_page):\r\n products_grid = rode_wijn_page.find('div', {'class': 'row products-grid'})\r\n articles = products_grid.find_all('article')\r\n wijn_list = []\r\n for article in articles:\r\n wijn_list.append(parse_wiijn_row_from_article_tag(article))\r\n return wijn_list\r\n\r\ndef get_html_style(table_html):\r\n return \"\"\"\r\n \r\n \r\n html title\r\n \r\n \r\n \r\n \"\"\" + table_html+''\r\n\r\ndef get_rode_wijn_df():\r\n URL_template = 'https://www.gall.nl/shop/acties/?filter=categorie%3Dwijn%2Frode-wijn&page={}'\r\n\r\n wijn_list = []\r\n\r\n url = URL_template.format(1)\r\n\r\n response = requests.get(url)\r\n rode_wijn_page = BeautifulSoup(response.text, 'html.parser')\r\n\r\n pages_qty = get_number_pages(rode_wijn_page)\r\n print(str(pages_qty) + ' pages!')\r\n wiijn_rows = get_wiijn_rows_from_page(rode_wijn_page)\r\n wijn_list.extend(wiijn_rows)\r\n\r\n for i in range(2, pages_qty + 1):\r\n url = URL_template.format(i)\r\n response = requests.get(url)\r\n rode_wijn_page = BeautifulSoup(response.text, 'html.parser')\r\n wijn_list.extend(get_wiijn_rows_from_page(rode_wijn_page))\r\n print('Retrieved page ' + str(i))\r\n\r\n wijn_df = pd.DataFrame(wijn_list, columns=get_wijn_df_columns())\r\n return wijn_df\r\n\r\ndef get_rode_wijn_html(wijn_df):\r\n taste_profile_order = {'Royal Red': 1,\r\n 'Big Red': 2,\r\n 'Classic Red': 3,\r\n 'Easy Rider': 4,\r\n 'Fresh Red': 5,\r\n 'Fruit Smoothie': 6}\r\n\r\n wijn_df[COL_taste_profile_order] = wijn_df.apply(\r\n lambda row: taste_profile_order.setdefault(row[COL_taste_profile], 100), axis=1)\r\n wijn_df.sort_values(by=[COL_taste_profile_order, COL_rating,COL_review_count,COL_discounted_price], ascending=[True, False,False,True], inplace=True)\r\n wijn_df_modif_index = wijn_df.set_index([COL_taste_profile, COL_title])\r\n\r\n\r\n def format_img(urls):\r\n urls_array = urls.split(COL_image_url_and_shop_url_separator)\r\n img_url = urls_array[0]\r\n shop_url = urls_array[1]\r\n return ''.format(shop_url, img_url)\r\n\r\n def format_rating(x):\r\n try:\r\n return '{0:.2f}'.format(float(x))\r\n except:\r\n return '??? ' + str(x)\r\n\r\n columns_to_html = [COL_rating, COL_review_count, COL_discounted_price, COL_price, COL_final_discount_percent,\r\n COL_image_url_and_shop_url,COL_offer_type]\r\n\r\n pd.set_option('display.max_colwidth', -1)\r\n table_html = wijn_df_modif_index.to_html(columns=columns_to_html,\r\n escape=False,\r\n formatters={\r\n COL_image_url_and_shop_url: format_img,\r\n COL_rating: format_rating,\r\n COL_final_discount_percent: lambda x: '{0:.0f}%'.format(x),\r\n COL_discounted_price: lambda x: '${}'.format(x),\r\n COL_price: lambda x: '${}'.format(x),\r\n })\r\n\r\n return get_html_style(table_html)", "sub_path": "rode_wijn_web_scrapper.py", "file_name": "rode_wijn_web_scrapper.py", "file_ext": "py", "file_size_in_byte": 8363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 170, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 171, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 180, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 217, "usage_type": "call"}]} {"seq_id": "386941201", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom wordcloud import WordCloud\nimport ast\n\n\ndef polarity_distribution(df, file_name,title):\n \"\"\"\n Plot distribution of polarity\n Parameters\n ----------\n df: pd.DataFrame\n file_name: str\n title: str\n\n Returns\n -------\n None\n \"\"\"\n save_path = '../reports/figures/'\n if 'sentiment_target' in df.columns: \n plt.figure(figsize=(20,15))\n df_pos = df['sum_polarity_unique_emoji'][df['sentiment_target']=='positive']\n df_neg = df['sum_polarity_unique_emoji'][df['sentiment_target']=='negative']\n plt.hist(df_pos,bins = 20,label='positive')\n plt.hist(df_neg,bins = 20,label='negative')\n plt.title(title,fontsize=25)\n plt.xlabel('Sum of polarity',fontsize=15)\n plt.ylabel('Counts',fontsize=15)\n plt.legend()\n else:\n plt.figure(figsize=(20,15))\n plt.hist(df['sum_polarity_unique_emoji'],bins=40)\n plt.title(title,fontsize=25)\n plt.xlabel('Sum of polarity',fontsize=15)\n plt.ylabel('Counts',fontsize=15)\n plt.savefig(save_path+file_name)\n \ndef target_distribution(df,file_name,title):\n \"\"\"\n Plot target distribution for classes\n Parameters\n ----------\n df: pd.DataFrame\n file_name: str\n title: str\n\n Returns\n -------\n None\n \"\"\"\n save_path = '../reports/figures/'\n plt.figure(figsize=(15,10))\n counts = df['sentiment_target'].value_counts()\n plt.bar(counts.index, counts.values)\n plt.title(title,fontsize=25)\n plt.xlabel('Target',fontsize=15)\n plt.ylabel('Counts',fontsize=15)\n plt.savefig(save_path+file_name)\n\ndef word_cloud(df,emotion):\n \"\"\"\n plot word cloud for the most important 100 words\n Parameters\n ----------\n df: pd.DataFrame\n emotion: str\n\n Returns\n -------\n\n \"\"\"\n save_path = '../reports/figures/'\n connect_word_in_tweet = []\n df_neg = df[df['sentiment_target']==emotion]\n for case in df_neg['tokens']:\n x = ast.literal_eval(case)\n x = ' '.join(x)\n connect_word_in_tweet.append(x)\n text = ' '.join(connect_word_in_tweet)\n text = text.replace('-pron-','')\n wordcloud = WordCloud(max_words=100,width=1920, height=1080).generate(text)\n plt.figure( figsize=(20,10) )\n plt.title(emotion)\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.savefig(save_path + 'wordcloud_' + emotion + '.png', facecolor='k', bbox_inches='tight')", "sub_path": "src/visualization/.ipynb_checkpoints/plot-checkpoint.py", "file_name": "plot-checkpoint.py", "file_ext": "py", "file_size_in_byte": 2470, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 77, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}]} {"seq_id": "485844169", "text": "# Copyright (C) 2018 Bloomberg LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# \n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nOperationsService\n=================\n\n\"\"\"\n\nimport logging\n\nimport grpc\n\nfrom google.protobuf.empty_pb2 import Empty\n\nfrom buildgrid._exceptions import InvalidArgumentError\nfrom buildgrid._protos.google.longrunning import operations_pb2_grpc, operations_pb2\nfrom buildgrid.server._authentication import AuthContext, authorize\n\n\nclass OperationsService(operations_pb2_grpc.OperationsServicer):\n\n def __init__(self, server):\n self.__logger = logging.getLogger(__name__)\n\n self._instances = {}\n\n operations_pb2_grpc.add_OperationsServicer_to_server(self, server)\n\n # --- Public API ---\n\n def add_instance(self, instance_name, instance):\n \"\"\"Registers a new servicer instance.\n\n Args:\n instance_name (str): The new instance's name.\n instance (OperationsInstance): The new instance itself.\n \"\"\"\n self._instances[instance_name] = instance\n\n # --- Public API: Servicer ---\n\n @authorize(AuthContext)\n def GetOperation(self, request, context):\n self.__logger.debug(\"GetOperation request from [%s]\", context.peer())\n\n try:\n name = request.name\n\n instance_name = self._parse_instance_name(name)\n instance = self._get_instance(instance_name)\n\n operation_name = self._parse_operation_name(name)\n operation = instance.get_operation(operation_name)\n op = operations_pb2.Operation()\n op.CopyFrom(operation)\n op.name = name\n return op\n\n except InvalidArgumentError as e:\n self.__logger.error(e)\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n\n return operations_pb2.Operation()\n\n @authorize(AuthContext)\n def ListOperations(self, request, context):\n self.__logger.debug(\"ListOperations request from [%s]\", context.peer())\n\n try:\n # The request name should be the collection name\n # In our case, this is just the instance_name\n instance_name = request.name\n instance = self._get_instance(instance_name)\n\n result = instance.list_operations(request.filter,\n request.page_size,\n request.page_token)\n\n for operation in result.operations:\n operation.name = \"{}/{}\".format(instance_name, operation.name)\n\n return result\n\n except InvalidArgumentError as e:\n self.__logger.error(e)\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n\n return operations_pb2.ListOperationsResponse()\n\n @authorize(AuthContext)\n def DeleteOperation(self, request, context):\n self.__logger.debug(\"DeleteOperation request from [%s]\", context.peer())\n\n try:\n name = request.name\n\n instance_name = self._parse_instance_name(name)\n instance = self._get_instance(instance_name)\n\n operation_name = self._parse_operation_name(name)\n instance.delete_operation(operation_name)\n\n except InvalidArgumentError as e:\n self.__logger.error(e)\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n\n return Empty()\n\n @authorize(AuthContext)\n def CancelOperation(self, request, context):\n self.__logger.debug(\"CancelOperation request from [%s]\", context.peer())\n\n try:\n name = request.name\n\n instance_name = self._parse_instance_name(name)\n instance = self._get_instance(instance_name)\n\n operation_name = self._parse_operation_name(name)\n instance.cancel_operation(operation_name)\n\n except InvalidArgumentError as e:\n self.__logger.error(e)\n context.set_details(str(e))\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n\n return Empty()\n\n # --- Private API ---\n\n def _parse_instance_name(self, name):\n \"\"\" If the instance name is not blank, 'name' will have the form\n {instance_name}/{operation_uuid}. Otherwise, it will just be\n {operation_uuid} \"\"\"\n names = name.split('/')\n return '/'.join(names[:-1]) if len(names) > 1 else ''\n\n def _parse_operation_name(self, name):\n names = name.split('/')\n return names[-1] if len(names) > 1 else name\n\n def _get_instance(self, name):\n try:\n return self._instances[name]\n\n except KeyError:\n raise InvalidArgumentError(\"Instance doesn't exist on server: [{}]\".format(name))\n", "sub_path": "buildgrid/server/operations/service.py", "file_name": "service.py", "file_ext": "py", "file_size_in_byte": 5265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "buildgrid._protos.google.longrunning.operations_pb2_grpc.OperationsServicer", "line_number": 33, "usage_type": "attribute"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2_grpc", "line_number": 33, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2_grpc.add_OperationsServicer_to_server", "line_number": 40, "usage_type": "call"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2_grpc", "line_number": 40, "usage_type": "name"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2.Operation", "line_number": 67, "usage_type": "call"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2", "line_number": 67, "usage_type": "name"}, {"api_name": "buildgrid._exceptions.InvalidArgumentError", "line_number": 72, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 75, "usage_type": "attribute"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2.Operation", "line_number": 77, "usage_type": "call"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2", "line_number": 77, "usage_type": "name"}, {"api_name": "buildgrid.server._authentication.authorize", "line_number": 55, "usage_type": "call"}, {"api_name": "buildgrid.server._authentication.AuthContext", "line_number": 55, "usage_type": "argument"}, {"api_name": "buildgrid._exceptions.InvalidArgumentError", "line_number": 98, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 101, "usage_type": "attribute"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2.ListOperationsResponse", "line_number": 103, "usage_type": "call"}, {"api_name": "buildgrid._protos.google.longrunning.operations_pb2", "line_number": 103, "usage_type": "name"}, {"api_name": "buildgrid.server._authentication.authorize", "line_number": 79, "usage_type": "call"}, {"api_name": "buildgrid.server._authentication.AuthContext", "line_number": 79, "usage_type": "argument"}, {"api_name": "buildgrid._exceptions.InvalidArgumentError", "line_number": 118, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 121, "usage_type": "attribute"}, {"api_name": "google.protobuf.empty_pb2.Empty", "line_number": 123, "usage_type": "call"}, {"api_name": "buildgrid.server._authentication.authorize", "line_number": 105, "usage_type": "call"}, {"api_name": "buildgrid.server._authentication.AuthContext", "line_number": 105, "usage_type": "argument"}, {"api_name": "buildgrid._exceptions.InvalidArgumentError", "line_number": 138, "usage_type": "name"}, {"api_name": "grpc.StatusCode", "line_number": 141, "usage_type": "attribute"}, {"api_name": "google.protobuf.empty_pb2.Empty", "line_number": 143, "usage_type": "call"}, {"api_name": "buildgrid.server._authentication.authorize", "line_number": 125, "usage_type": "call"}, {"api_name": "buildgrid.server._authentication.AuthContext", "line_number": 125, "usage_type": "argument"}, {"api_name": "buildgrid._exceptions.InvalidArgumentError", "line_number": 163, "usage_type": "call"}]} {"seq_id": "489081499", "text": "import json\nimport dataClasses\n\ndef readRaw(filename):\n infile = open(filename,\"r\")\n indata = infile.read()\n infile.close()\n return indata\n\ndef makeEvents(rawdata):\n ou = []\n for x in rawdata[\"EVENTS\"]:\n #x is an event structure\n ev = dataClasses.data_event()\n ev.fromDict(x)\n ou.append(ev)\n return ou\n\ndef read(filename):\n raw = readRaw(filename)\n st = json.loads(raw)\n del(raw)\n osc_meta = st[\"OSC_META\"]\n ev_list = makeEvents(st)\n del(st)\n return [osc_meta,ev_list]\n", "sub_path": "read_ord_json_clump.py", "file_name": "read_ord_json_clump.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "dataClasses.data_event", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]} {"seq_id": "31211470", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom numpy.fft import fft, fftfreq, ifft\n\nlx = 100 # time in seconds\nn = 1000 # number of points\n\nx = np.linspace(1, lx, n) # Time\n\nomg = 2.0*np.pi/lx\n\ny1 = 3.0*np.cos(5.0*omg*x)\ny2 = 2.0*np.sin(10.0*omg*x)\ny3 = 1.0*np.sin(20.0*omg*x)\n\ny = y3+y2+y1\n\nplt.figure(1)\nplt.plot(x, y, '-b')\n# plt.show()\n\nffts = fftfreq(n)\nfftvals = fft(y)\n\n#further processing\nmask = ffts > 0\nffttheo = 2.0*abs(fftvals/n)\n\n\nplt.figure(2)\nplt.plot(ffts, fftvals, '-g')\n# plt.plot(ffts[mask], ffttheo[mask], '-g')\nplt.show()", "sub_path": "tutorial2_revamp/NUMPY/tut8_FFT.py", "file_name": "tut8_FFT.py", "file_ext": "py", "file_size_in_byte": 553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.linspace", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.fft.fftfreq", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}]} {"seq_id": "430291563", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport requests\nimport lxml\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36\"\n }\nurl = \"http://go.hao123.com\"\nres= requests.get(url,headers=headers)\nhtml = lxml.etree.HTML(res.text)\ntejia =html.xpath(\"//div[@class='tejia-air-wrapper']\")#ๅ…ˆๆ‰พๅˆฐๅไธชๅŸŽๅธ‚\ntejias =tejia[0].xpath(\".//div[@class='tejia-air']\")\nfile = open(\"tejia-air.md\",\"w\")\nfor tj in tejias:\n aa = tj.xpath(\".//a[@class='flight']\")\n for a in aa:\n fromto = a.xpath(\"./@alog-text\")\n \n price = a.xpath(\".//span[@class ='tickets-price']/text()\")\n file.write(fromto[0]+price[0]+' ')\n print(fromto,price)\nfile.close()", "sub_path": "Tejia_air.py", "file_name": "Tejia_air.py", "file_ext": "py", "file_size_in_byte": 829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 15, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 15, "usage_type": "attribute"}]} {"seq_id": "61359737", "text": "from __future__ import division\n\nimport numpy as np\nimport operator\nimport scipy.sparse as sp\nfrom numpy.testing import assert_raises\nfrom pyoperators import SparseOperator\nfrom pyoperators.utils.testing import assert_same\n\nA = np.array([[1, 0, 2, 0],\n [0, 0, 3, 0],\n [4, 5, 6, 0],\n [1, 0, 0, 1]])\nvecs = [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]\nformats = 'bsr,csr,csc,coo,dia,dok'.split(',')\n\n\ndef test_matvec():\n def func(format):\n cls = getattr(sp, format + '_matrix')\n so = SparseOperator(cls(A))\n for vec in vecs:\n assert_same(so(vec), np.dot(A, vec))\n assert_same(so.T(vec), np.dot(A.T, vec))\n assert_same(so.todense(), A)\n assert_same(so.T.todense(), A.T)\n for format in formats:\n yield func, format\n\n\ndef test_shapes():\n def func(format):\n cls = getattr(sp, format + '_matrix')\n shapein = (2, 2)\n shapeout = (1, 4, 1)\n so = SparseOperator(cls(A), shapein=shapein, shapeout=shapeout)\n for vec in vecs:\n assert_same(so(np.reshape(vec, shapein)),\n np.dot(A, vec).reshape(shapeout))\n assert_same(so.T(np.reshape(vec, shapeout)),\n np.dot(A.T, vec).reshape(shapein))\n for format in formats:\n yield func, format\n\n\ndef test_update_output():\n def func(format):\n cls = getattr(sp, format + '_matrix')\n so = SparseOperator(cls(A))\n out = np.zeros(4, dtype=int)\n outT = np.zeros(4, dtype=int)\n for vec in vecs:\n so(vec, out, operation=operator.iadd)\n so.T(vec, outT, operation=operator.iadd)\n assert_same(out, np.sum(A, axis=1))\n assert_same(outT, np.sum(A, axis=0))\n for format in formats:\n yield func, format\n\n\ndef test_error1():\n values = (sp.lil_matrix(A), np.zeros((10, 4)),\n np.matrix(np.zeros((10, 4))), 3)\n\n def func(v):\n assert_raises(TypeError, SparseOperator, v)\n for v in values:\n yield func, v\n\n\ndef test_error2():\n def func(format):\n cls = getattr(sp, format + '_matrix')\n sm = cls(A)\n shapein = (2, 3)\n shapeout = (1, 4, 2)\n assert_raises(ValueError, SparseOperator, sm, shapein=shapein)\n assert_raises(ValueError, SparseOperator, sm, shapeout=shapeout)\n for format in formats:\n yield func, format\n\n\ndef test_error3():\n def func(format):\n cls = getattr(sp, format + '_matrix')\n sm = cls(A)\n so = SparseOperator(sm)\n out = np.zeros(4, dtype=int)\n assert_raises(ValueError, so, vecs[0], out, operation=operator.imul)\n for format in formats:\n yield func, format\n", "sub_path": "test/test_sparse.py", "file_name": "test_sparse.py", "file_ext": "py", "file_size_in_byte": 2758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 23, "usage_type": "argument"}, {"api_name": "pyoperators.SparseOperator", "line_number": 24, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 26, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 27, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 28, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 36, "usage_type": "argument"}, {"api_name": "pyoperators.SparseOperator", "line_number": 39, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 42, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 51, "usage_type": "argument"}, {"api_name": "pyoperators.SparseOperator", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "operator.iadd", "line_number": 56, "usage_type": "attribute"}, {"api_name": "operator.iadd", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "pyoperators.utils.testing.assert_same", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.sparse.lil_matrix", "line_number": 65, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 65, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.testing.assert_raises", "line_number": 69, "usage_type": "call"}, {"api_name": "pyoperators.SparseOperator", "line_number": 69, "usage_type": "argument"}, {"api_name": "scipy.sparse", "line_number": 76, "usage_type": "argument"}, {"api_name": "numpy.testing.assert_raises", "line_number": 80, "usage_type": "call"}, {"api_name": "pyoperators.SparseOperator", "line_number": 80, "usage_type": "argument"}, {"api_name": "numpy.testing.assert_raises", "line_number": 81, "usage_type": "call"}, {"api_name": "pyoperators.SparseOperator", "line_number": 81, "usage_type": "argument"}, {"api_name": "scipy.sparse", "line_number": 88, "usage_type": "argument"}, {"api_name": "pyoperators.SparseOperator", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.testing.assert_raises", "line_number": 92, "usage_type": "call"}, {"api_name": "operator.imul", "line_number": 92, "usage_type": "attribute"}]} {"seq_id": "643176201", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport os, sys\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(CURRENT_DIR))\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import TensorDataset,DataLoader,SubsetRandomSampler\nfrom embeddings.model import SNEmbedding,PoincareDistance\nfrom embeddings.rsgd import poincare_grad,RiemannianSGD,euclidean_retraction\n\nclass_path = 'class_dict.txt'\n\nsplit_folder = ('zeroshot/standard_split/AWA2/')\nsplit_version = 1\n \ndef get_features():\n \n train_split_f = open(split_folder+\"trainclasses\"+str(split_version)+\".txt\",'r')\n val_split_f = open(split_folder+\"valclasses\"+str(split_version)+\".txt\",'r')\n features_f = open(\"zeroshot/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt\",'r')\n labels_f = open(\"zeroshot/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt\",'r')\n class_file = open(\"zeroshot/Animals_with_Attributes2/classes.txt\",\"r\")\n wordnet_file = open(\"wordnet_dict.txt\",\"r\")\n \n emb = SNEmbedding(1181,32,PoincareDistance,1.0)\n emb.load_state_dict(torch.load('mammals_32d.pth')['model'])\n for i in emb.parameters():\n weights = i\n \n all_classes = set([])\n train_classes = set([])\n val_classes = set([])\n ci_map = {}\n \n for line in wordnet_file:\n line = line[:-1].split(\",\")\n all_classes.add(line[1])\n wordnet_file = open(\"wordnet_dict.txt\",\"r\")\n\n c = 0\n for line in train_split_f:\n line = line[:-1]\n if line in all_classes:\n train_classes.add(line)\n ci_map[line] = c\n c += 1\n for line in val_split_f:\n line = line[:-1]\n if line in all_classes:\n val_classes.add(line)\n ci_map[line] = c\n c += 1\n\n class_index_map = {}\n index_class_map = {}\n emb_map = {}\n n = 0\n i_map = {}\n for line in class_file:\n line = line.strip().split(\"\\t\")\n if line[1] in train_classes or line[1] in val_classes:\n class_index_map[line[1]] = ci_map[line[1]]\n index_class_map[ci_map[line[1]]] = line[1]\n i_map[n] = ci_map[line[1]]\n n += 1\n\n for line in wordnet_file:\n line = line[:-1].split(\",\")\n if int(line[0])-1 in i_map:\n emb_map[class_index_map[line[1]]] = weights[int(line[3])].data\n\n train_features = []\n train_labels = []\n val_features = []\n val_labels = []\n\n for feature,label in zip(features_f,labels_f):\n label = int(label)-1\n if label in i_map:\n if i_map[label] in index_class_map.keys():\n if index_class_map[i_map[label]] in train_classes:\n train_features.append(feature[:-1].split(\" \"))\n train_labels.append(i_map[label])\n elif index_class_map[i_map[label]] in val_classes:\n val_features.append(feature[:-1].split(\" \"))\n val_labels.append(i_map[label])\n\n batch_size = 32\n train_features = np.array(train_features,dtype=np.float32)\n train_labels = np.array(train_labels,dtype=np.float32)\n val_features = np.array(val_features,dtype=np.float32)\n val_labels = np.array(val_labels,dtype=np.float32)\n trainset = TensorDataset(torch.FloatTensor(train_features),torch.LongTensor(train_labels))\n trainloader = DataLoader(trainset,batch_size=batch_size,shuffle=True)\n valset = TensorDataset(torch.FloatTensor(val_features),torch.LongTensor(val_labels))\n valloader = DataLoader(valset,batch_size=batch_size,shuffle=True)\n \n return trainloader,valloader,index_class_map,emb_map\n\nclass ImageEmbedding(nn.Module):\n def __init__(self,input_dim,output_dim,num_classes=39,layers=[],activation=F.relu):\n super(ImageEmbedding, self).__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.activation = activation\n self.num_classes = num_classes\n self.layers = []\n d = input_dim\n for h_dim in layers:\n self.layers.append(nn.Linear(d,h_dim))\n d = h_dim\n self.layers = nn.ModuleList(self.layers)\n self.output = nn.Linear(d,output_dim)\n self.distfn = PoincareDistance()\n self.softmax = nn.LogSoftmax()\n\n def forward(self,x,emb_weights):\n for l in self.layers:\n x = l(x)\n x = self.activation(x)\n x = self.output(x)\n x = x/(torch.norm(x,dim=1).reshape([-1,1])+1e-6)\n stacked = torch.stack([x]*emb_weights.shape[1],1)\n dists = self.distfn(stacked,emb_weights)\n return self.softmax(-dists)\n\ndef train(model,emb_map,trainloader,valloader,lr=1e-3,n_epochs=5000,n_train=26,n_val=13,batch_size=32):\n \n train_emb = [torch.FloatTensor(emb_map[i]) for i in range(n_train)]\n train_emb = torch.stack(train_emb)\n val_emb = [torch.FloatTensor(emb_map[i]) for i in range(n_train,n_train+n_val)]\n val_emb = torch.stack(val_emb)\n \n optimizer = optim.Adam(model.parameters(),lr=lr)\n model_loss = nn.CrossEntropyLoss()\n \n for ep in range(n_epochs):\n print(\"Epoch # : \" + str(ep))\n num_train = 0.0\n train_loss = 0.0\n train_acc = 0.0\n\n train_batch_emb = torch.stack([train_emb]*batch_size)\n train_batch_emb = Variable(train_batch_emb,requires_grad=False).cuda()\n for x,y in trainloader:\n x = x.cuda()\n y = y.cuda()\n optimizer.zero_grad()\n if x.shape[0] == batch_size:\n preds = model(x,train_batch_emb)\n loss = model_loss(preds,y)\n _, preds = torch.max(preds.data,1)\n train_acc += torch.sum(preds==y.data)\n train_loss += loss.item()\n num_train += x.shape[0]\n loss.backward(retain_graph=True)\n # optimizer.step(lr=lr)\n optimizer.step()\n\n if ep % 1 == 0:\n print(\"Train loss : \" + str(train_loss/num_train))\n print(\"Train accuracy : \" + str(float(train_acc.item())/num_train))\n\n val_batch_emb = torch.stack([val_emb]*batch_size)\n val_batch_emb = Variable(val_batch_emb,requires_grad=False).cuda()\n num_val = 0.0\n val_loss = 0.0\n val_acc = 0.0\n for x,y in valloader:\n x = x.cuda()\n y = y.cuda()\n y = y-n_train\n if x.shape[0] != batch_size:\n temp_emb = Variable(torch.stack([val_emb]*x.shape[0]),requires_grad=False).cuda()\n preds = model(x,temp_emb)\n else:\n preds = model(x,val_batch_emb)\n loss = model_loss(preds,y)\n val_loss += loss.item()\n num_val += x.shape[0]\n _, preds = torch.max(preds.data,1)\n val_acc += torch.sum(preds==y.data)\n print(\"Validation loss : \" + str(val_loss/num_val))\n print(\"Validation accuracy : \" + str(float(val_acc.item())/num_val))\n\ntrainloader,valloader,index_hash,emb_map = get_features()\nmodel = ImageEmbedding(2048,32,layers=[256],num_classes=26).to('cuda')\ntrain(model,emb_map,trainloader,valloader,num_classes=26)\n", "sub_path": "clustering.py", "file_name": "clustering.py", "file_ext": "py", "file_size_in_byte": 7321, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "embeddings.model.SNEmbedding", "line_number": 29, "usage_type": "call"}, {"api_name": "embeddings.model.PoincareDistance", "line_number": 29, "usage_type": "argument"}, {"api_name": "torch.load", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "embeddings.model.PoincareDistance", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 186, "usage_type": "call"}]} {"seq_id": "427501547", "text": "\"\"\"empty message\n\nRevision ID: cffecda0acbb\nRevises: 70e1cf8d7a57\nCreate Date: 2019-06-06 10:41:47.728713\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"cffecda0acbb\"\ndown_revision = \"70e1cf8d7a57\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"users\",\n sa.Column(\n \"keypair_seed\", sa.String(length=128), server_default=\"Null\", nullable=True\n ),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"users\", \"keypair_seed\")\n # ### end Alembic commands ###\n", "sub_path": "client/database/migrations/versions/cffecda0acbb_.py", "file_name": "cffecda0acbb_.py", "file_ext": "py", "file_size_in_byte": 739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}]} {"seq_id": "74233774", "text": "\"\"\"views for harshp\n\"\"\"\n\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\n\nfrom blog.models import BlogPost\nfrom stories.models import StoryPost\nfrom poems.models import Poem\nfrom articles.models import Article\nfrom lifeX.models import LifeXWeek\nfrom brainbank.models import BrainBankIdea\nfrom sitedata.social_meta import create_meta\n\n\ndef home(request):\n \"\"\"homepage for harshp.com\n\n show all the latest posts in different apps\n\n Args:\n request(HttpResponse)\n\n Returns:\n HttpResponse: 200 on success, 404 on error\n\n Raises:\n Http404: error retrieving posts\n \"\"\"\n try:\n blogs = BlogPost.objects.order_by('-published')[:3]\n stories = StoryPost.objects.order_by('-published')[:3]\n poems = Poem.objects.order_by('-published')[:3]\n articles = Article.objects.order_by('-published')[:3]\n brainbank_idea = BrainBankIdea.objects.latest('published')\n lifexweek = LifeXWeek.objects.latest('number')\n description = \"\"\"\n The personal website of Harshvardhan Pandit (coolharsh55)\"\"\"\n keywords = ['harshp.com', 'blog', 'stories', 'poems', ]\n meta = create_meta(\n 'harshp.com',\n description,\n keywords,\n url=request.build_absolute_uri(),\n )\n except (BlogPost.DoesNotExist,\n StoryPost.DoesNotExist,\n Poem.DoesNotExist,\n Article.DoesNotExist,\n LifeXWeek.DoesNotExist,\n BrainBankIdea.DoesNotExist):\n raise Http404('Error retrieving website data...')\n return render_to_response(\n 'harshp/index.html',\n {\n 'blogs': blogs,\n 'stories': stories,\n 'poems': poems,\n 'articles': articles,\n 'brainbank_idea': brainbank_idea,\n 'lifeXweek': lifexweek,\n 'meta': meta,\n }\n )\n\n\ndef changelog(request):\n \"\"\"changelog\n\n show the project changelog\n\n Args:\n request(HttpResponse)\n\n Returns:\n HttpResponse: 200 on success, 404 on error\n\n Raises:\n None\n \"\"\"\n return render_to_response('harshp/changelog.html')\n\n\ndef privacypolicy(request):\n \"\"\"privacy policy\n\n show the privacy policy for website\n\n Args:\n request(HttpResponse)\n\n Returns:\n HttpResponse: 200 on success, 404 on error\n\n Raises:\n None\n \"\"\"\n return render_to_response('harshp/privacypolicy.html')\n", "sub_path": "harshp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "blog.models.BlogPost.objects.order_by", "line_number": 31, "usage_type": "call"}, {"api_name": "blog.models.BlogPost.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "blog.models.BlogPost", "line_number": 31, "usage_type": "name"}, {"api_name": "stories.models", "line_number": 32, "usage_type": "name"}, {"api_name": "stories.models.StoryPost.objects.order_by", "line_number": 32, "usage_type": "call"}, {"api_name": "stories.models.StoryPost.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "stories.models.StoryPost", "line_number": 32, "usage_type": "name"}, {"api_name": "poems.models", "line_number": 33, "usage_type": "name"}, {"api_name": "poems.models.Poem.objects.order_by", "line_number": 33, "usage_type": "call"}, {"api_name": "poems.models.Poem.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "poems.models.Poem", "line_number": 33, "usage_type": "name"}, {"api_name": "articles.models", "line_number": 34, "usage_type": "name"}, {"api_name": "articles.models.Article.objects.order_by", "line_number": 34, "usage_type": "call"}, {"api_name": "articles.models.Article.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 34, "usage_type": "name"}, {"api_name": "brainbank.models.BrainBankIdea.objects.latest", "line_number": 35, "usage_type": "call"}, {"api_name": "brainbank.models.BrainBankIdea.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "brainbank.models.BrainBankIdea", "line_number": 35, "usage_type": "name"}, {"api_name": "lifeX.models.LifeXWeek.objects.latest", "line_number": 36, "usage_type": "call"}, {"api_name": "lifeX.models.LifeXWeek.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "lifeX.models.LifeXWeek", "line_number": 36, "usage_type": "name"}, {"api_name": "sitedata.social_meta.create_meta", "line_number": 40, "usage_type": "call"}, {"api_name": "blog.models.BlogPost.DoesNotExist", "line_number": 46, "usage_type": "attribute"}, {"api_name": "blog.models.BlogPost", "line_number": 46, "usage_type": "name"}, {"api_name": "stories.models.StoryPost.DoesNotExist", "line_number": 47, "usage_type": "attribute"}, {"api_name": "stories.models.StoryPost", "line_number": 47, "usage_type": "name"}, {"api_name": "poems.models.Poem.DoesNotExist", "line_number": 48, "usage_type": "attribute"}, {"api_name": "poems.models.Poem", "line_number": 48, "usage_type": "name"}, {"api_name": "articles.models.Article.DoesNotExist", "line_number": 49, "usage_type": "attribute"}, {"api_name": "articles.models.Article", "line_number": 49, "usage_type": "name"}, {"api_name": "lifeX.models.LifeXWeek.DoesNotExist", "line_number": 50, "usage_type": "attribute"}, {"api_name": "lifeX.models.LifeXWeek", "line_number": 50, "usage_type": "name"}, {"api_name": "brainbank.models.BrainBankIdea.DoesNotExist", "line_number": 51, "usage_type": "attribute"}, {"api_name": "brainbank.models.BrainBankIdea", "line_number": 51, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 53, "usage_type": "call"}, {"api_name": "stories.models", "line_number": 57, "usage_type": "name"}, {"api_name": "poems.models", "line_number": 58, "usage_type": "name"}, {"api_name": "articles.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 98, "usage_type": "call"}]} {"seq_id": "297261373", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\n# %% sklearn version: \"LDA\" algorithm using sklearn library\r\n\r\n\r\n\r\nimport numpy as np\r\nfrom sklearn.discriminant_analysis import \\\r\n LinearDiscriminantAnalysis\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n# %% part 1 - load dataset and print summary\r\n\r\ntraining_dataset = np.loadtxt('fld.txt', delimiter=',')\r\nX_training_dataset = training_dataset[::, :2]\r\ny_training_dataset = training_dataset[::, 2]\r\n\r\nX_class_0_idx = np.where(y_training_dataset == 0)\r\nX_class_1_idx = np.where(y_training_dataset == 1)\r\nX_class_0 = X_training_dataset[X_class_0_idx]\r\nX_class_1 = X_training_dataset[X_class_1_idx]\r\n\r\nprint('----------------------------------------------------------------------')\r\nprint('----------------------------------------------------------------------')\r\nprint('Summary of the Dataset:')\r\nprint('Class \"0\" info: Shape - Head - Tail')\r\nprint('Shape:', X_class_0.shape)\r\nprint('Head:\\n', X_class_0[:5, ::])\r\nprint('Tail:\\n', X_class_0[-5:, ::])\r\nprint('----------------------------------------------------------------------')\r\nprint('Class \"1\" info: Shape - Head - Tail')\r\nprint('Shape:', X_class_1.shape)\r\nprint('Head:\\n', X_class_1[:5, ::])\r\nprint('Tail:\\n', X_class_1[-5:, ::])\r\nprint('----------------------------------------------------------------------')\r\n\r\n# %% part 2 - perform Linear Discriminant Analysis (LDA)\r\n\r\nLDA_object = LinearDiscriminantAnalysis(store_covariance=True)\r\nLDA_object.fit(X_training_dataset, y_training_dataset)\r\nWeight_vector = LDA_object.coef_[0]; intercept = LDA_object.intercept_\r\nprint('----------------------------------------------------------------------')\r\nprint('LDA >> Slope and Intercept:')\r\nprint('Slope (of log-posterior of LDA) =', Weight_vector,\r\n ',\\nIntercept (of log-posterior of LDA) =', intercept)\r\nprint('----------------------------------------------------------------------')\r\n\r\npi_0 = len(X_class_0) / (len(X_class_1) + len(X_class_0))\r\npi_1 = len(X_class_1) / (len(X_class_1) + len(X_class_0))\r\nCst = np.log(pi_0/pi_1)\r\n# Note >> covariance_array-like of shape (n_features, n_features)\r\n# Note >> means_array-like of shape (n_classes, n_features)\r\nw = np.dot(np.linalg.inv(LDA_object.covariance_),\r\n (LDA_object.means_[0] - LDA_object.means_[1]))\r\nDC_term = Cst - 0.5 * np.dot((LDA_object.means_[0] + LDA_object.means_[1]).T, w)\r\n\r\n# %% part 3 - prediction\r\n\r\npredictions = (np.sign(np.dot(w, X_training_dataset.T) + DC_term) + 1) / 2\r\nerror_possibility_1 = sum(predictions != y_training_dataset)\r\nerror_possibility_2 = sum((1 - predictions) != y_training_dataset)\r\nrel_error_1 = error_possibility_1 / len(y_training_dataset)\r\nrel_error_2 = error_possibility_2 / len(y_training_dataset)\r\n\r\nif rel_error_1 < rel_error_2:\r\n final_predictions = predictions\r\nelse:\r\n final_predictions = 1 - predictions\r\n\r\nnum_preds_to_print = 20\r\nprint(f'Some of predictions are: [first {num_preds_to_print}]\\n',\r\n final_predictions[:num_preds_to_print])\r\nprint(f'Some of predictions are: [last {num_preds_to_print}]\\n',\r\n final_predictions[-num_preds_to_print:])\r\n\r\n# %% part 4 - error report\r\n\r\nerrorIndex = np.argwhere(final_predictions != y_training_dataset)\r\nerrorPts = X_training_dataset[errorIndex]\r\nerrorPts = np.squeeze(errorPts)\r\n\r\nprint('----------------------------------------------------------------------')\r\nprint('LDA >> Error:', 100 * min(rel_error_2, rel_error_1), '%.')\r\nprint('LDA >> LDA_object.score():',\r\n 100 * LDA_object.score(X_training_dataset, y_training_dataset), '%.')\r\nprint('----------------------------------------------------------------------')\r\nprint('----------------------------------------------------------------------')\r\n\r\n# %% part 5 - visualization\r\n\r\n# first plot\r\nfigure_width = 20\r\noriginal_data_linewidth = 5\r\nlegend_fontsize = 20\r\nplot_grid_option = False\r\n\r\n\r\nplt.figure(figsize=(figure_width, figure_width / 1.618))\r\nplt.scatter(X_class_0[:, 0],\r\n X_class_0[:, 1],\r\n c='r', marker='.',\r\n linewidths=original_data_linewidth)\r\nplt.scatter(X_class_1[:, 0],\r\n X_class_1[:, 1],\r\n c='b', marker='.',\r\n linewidths=original_data_linewidth)\r\n\r\nk0, k1 = 5, 3\r\nplt.plot([- k0 * w[0], k1 * w[0]],\r\n [-k0 * w[1], k1 * w[1]],\r\n 'g--', lw=5)\r\n\r\nplt.xlabel('first axis (x0)', size=legend_fontsize)\r\nplt.ylabel('second axis (x1)', size=legend_fontsize)\r\nplt.legend(['LDA line', 'original data - class 0', 'original data - class 1'],\r\n fontsize=legend_fontsize)\r\n\r\nplt.savefig('sklearn-improved-img-1.png', dpi=300)\r\nplt.grid(True)\r\nplt.savefig('sklearn-improved-img-1-grid.png', dpi=300)\r\nplt.show()\r\n\r\n# second plot\r\nplt.figure(figsize=(figure_width, figure_width / 1.618))\r\nplt.scatter(X_class_0[:, 0],\r\n X_class_0[:, 1],\r\n c='r', marker='.',\r\n linewidths=original_data_linewidth)\r\nplt.scatter(X_class_1[:, 0],\r\n X_class_1[:, 1],\r\n c='b', marker='.',\r\n linewidths=original_data_linewidth)\r\n\r\nk0, k1 = 5, 3\r\nplt.plot([- k0 * w[0], k1 * w[0]],\r\n [-k0 * w[1], k1 * w[1]],\r\n 'g--', lw=5)\r\n\r\n\r\nplt.scatter(errorPts[:, 0],\r\n errorPts[:, 1],\r\n c='orange',\r\n marker='o')\r\n\r\nplt.xlabel('first axis (x0)', size=legend_fontsize)\r\nplt.ylabel('second axis (x1)', size=legend_fontsize)\r\nplt.legend(['LDA line',\r\n 'original data - class 0',\r\n 'original data - class 1',\r\n 'LDA error samples'],\r\n fontsize=legend_fontsize)\r\n\r\nplt.savefig('sklearn-improved-img-2.png', dpi=300)\r\nplt.grid(True)\r\nplt.savefig('sklearn-improved-img-2-grid.png', dpi=300)\r\nplt.show()\r\n\r\n\r\n", "sub_path": "sklearn_version_improved_Mo_Zamanian.py", "file_name": "sklearn_version_improved_Mo_Zamanian.py", "file_ext": "py", "file_size_in_byte": 5703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.loadtxt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} {"seq_id": "274895929", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/dreegdl/smote.py\n# Compiled at: 2020-03-04 18:08:43\n# Size of source mod 2**32: 2485 bytes\nfrom sklearn.neighbors import NearestNeighbors\nimport random as rd\n\nclass Smote:\n __doc__ = '\\n Implement SMOTE, synthetic minority oversampling technique.\\n Parameters\\n -----------\\n sample 2D (numpy)array\\n minority class samples\\n N Integer\\n amount of SMOTE N%\\n k Integer\\n number of nearest neighbors k\\n k <= number of minority class samples\\n Attributes\\n ----------\\n newIndex Integer\\n keep a count of number of synthetic samples\\n initialize as 0\\n synthetic 2D array\\n array for synthetic samples\\n neighbors K-Nearest Neighbors model\\n '\n\n def __init__(self, sample, N, k):\n self.sample = sample\n self.k = k\n self.T = len(self.sample)\n self.N = N\n self.newIndex = 0\n self.synthetic = []\n self.neighbors = NearestNeighbors(n_neighbors=(self.k)).fit(self.sample)\n\n def over_sampling(self):\n if self.N < 100:\n self.T = self.N / 100 * self.T\n self.N = 100\n self.N = int(self.N / 100)\n for i in range(0, self.T):\n nn_array = self.compute_k_nearest(i)\n self.populate(self.N, i, nn_array)\n\n def compute_k_nearest(self, i):\n nn_array = self.neighbors.kneighbors([self.sample[i]], (self.k), return_distance=False)\n if len(nn_array) is 1:\n return nn_array[0]\n return []\n\n def populate(self, N, i, nn_array):\n while N is not 0:\n nn = rd.randint(1, self.k - 1)\n self.synthetic.append([])\n for attr in range(0, len(self.sample[i])):\n dif = self.sample[nn_array[nn]][attr] - self.sample[i][attr]\n gap = rd.random()\n while gap == 0:\n gap = rd.random()\n\n self.synthetic[self.newIndex].append(self.sample[i][attr] + gap * dif)\n\n self.newIndex += 1\n N -= 1", "sub_path": "pycfiles/dreegdl-1.0.0-py3.7/smote.cpython-37.py", "file_name": "smote.cpython-37.py", "file_ext": "py", "file_size_in_byte": 2273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "random.random", "line_number": 44, "usage_type": "call"}, {"api_name": "random.random", "line_number": 46, "usage_type": "call"}]} {"seq_id": "651400402", "text": "from django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom django.shortcuts import get_object_or_404\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\n\nfrom ..assessment.api import (\n AssessmentEditViewset,\n AssessmentLevelPermissions,\n AssessmentViewset,\n DisabledPagination,\n InAssessmentFilter,\n get_assessment_id_param,\n)\nfrom ..assessment.models import Assessment, TimeSpentEditing\nfrom ..common.api import CleanupFieldsBaseViewSet, LegacyAssessmentAdapterMixin\nfrom ..common.helper import re_digits, tryParseInt\nfrom ..common.renderers import PandasRenderers\nfrom ..common.serializers import UnusedSerializer\nfrom ..common.views import AssessmentPermissionsMixin\nfrom ..mgmt.models import Task\nfrom ..riskofbias import exports\nfrom ..study.models import Study\nfrom . import models, serializers\n\n\nclass RiskOfBiasAssessmentViewset(\n AssessmentPermissionsMixin, LegacyAssessmentAdapterMixin, viewsets.GenericViewSet\n):\n parent_model = Assessment\n model = Study\n permission_classes = (AssessmentLevelPermissions,)\n serializer_class = UnusedSerializer\n lookup_value_regex = re_digits\n\n def get_queryset(self):\n\n perms = self.get_obj_perms()\n if not perms[\"edit\"]:\n return self.model.objects.published(self.assessment)\n return self.model.objects.get_qs(self.assessment.id)\n\n @action(detail=True, methods=(\"get\",), url_path=\"export\", renderer_classes=PandasRenderers)\n def export(self, request, pk):\n self.set_legacy_attr(pk)\n self.permission_check_user_can_view()\n rob_name = self.assessment.get_rob_name_display().lower()\n exporter = exports.RiskOfBiasFlat(\n self.get_queryset(), filename=f\"{self.assessment}-{rob_name}\"\n )\n\n return Response(exporter.build_export())\n\n @action(detail=True, methods=(\"get\",), url_path=\"full-export\", renderer_classes=PandasRenderers)\n def full_export(self, request, pk):\n self.set_legacy_attr(pk)\n self.permission_check_user_can_view()\n rob_name = self.assessment.get_rob_name_display().lower()\n exporter = exports.RiskOfBiasCompleteFlat(\n self.get_queryset(), filename=f\"{self.assessment}-{rob_name}-complete\"\n )\n return Response(exporter.build_export())\n\n\nclass RiskOfBiasDomain(viewsets.ReadOnlyModelViewSet):\n assessment_filter_args = \"assessment\"\n model = models.RiskOfBiasDomain\n pagination_class = DisabledPagination\n permission_classes = (AssessmentLevelPermissions,)\n filter_backends = (InAssessmentFilter, DjangoFilterBackend)\n serializer_class = serializers.AssessmentDomainSerializer\n lookup_value_regex = re_digits\n\n def get_queryset(self):\n return self.model.objects.all().prefetch_related(\"metrics\")\n\n\nclass RiskOfBias(viewsets.ModelViewSet):\n assessment_filter_args = \"study__assessment\"\n model = models.RiskOfBias\n pagination_class = DisabledPagination\n permission_classes = (AssessmentLevelPermissions,)\n filter_backends = (InAssessmentFilter, DjangoFilterBackend)\n serializer_class = serializers.RiskOfBiasSerializer\n lookup_value_regex = re_digits\n\n def get_queryset(self):\n return self.model.objects.all().prefetch_related(\n \"study\", \"author\", \"scores__metric__domain\"\n )\n\n def perform_update(self, serializer):\n super().perform_update(serializer)\n study = serializer.instance.study\n user = self.request.user\n Task.objects.ensure_rob_started(study, user)\n if serializer.instance.final and serializer.instance.is_complete:\n Task.objects.ensure_rob_stopped(study)\n\n # send time complete task\n if not serializer.errors:\n TimeSpentEditing.add_time_spent_job(\n self.request.session.session_key,\n serializer.instance.get_edit_url(),\n serializer.instance,\n serializer.instance.get_assessment().id,\n )\n\n def create(self, request, *args, **kwargs):\n study_id = tryParseInt(request.data.get(\"study_id\"), -1)\n\n try:\n study = Study.objects.get(id=study_id)\n except ObjectDoesNotExist:\n raise ValidationError(\"Invalid study_id\")\n\n # permission check using the user submitting the request\n if not study.user_can_edit_study(study.assessment, request.user):\n raise PermissionDenied(\n f\"Submitter '{request.user}' has invalid permissions to edit Risk of Bias for this study\"\n )\n\n # overridden_objects is not marked as optional in RiskOfBiasScoreSerializerSlim; if it's not present\n # in the payload, let's just add an empty array.\n scores = request.data.get(\"scores\")\n for score in scores:\n if \"overridden_objects\" not in score:\n score[\"overridden_objects\"] = []\n\n return super().create(request, *args, **kwargs)\n\n @action(detail=True, methods=[\"get\"])\n def override_options(self, request, pk=None):\n object_ = self.get_object()\n return Response(object_.get_override_options())\n\n\nclass AssessmentMetricViewset(AssessmentViewset):\n model = models.RiskOfBiasMetric\n serializer_class = serializers.AssessmentMetricChoiceSerializer\n pagination_class = DisabledPagination\n assessment_filter_args = \"domain__assessment\"\n\n def get_queryset(self):\n return self.model.objects.all()\n\n\nclass AssessmentMetricScoreViewset(AssessmentViewset):\n model = models.RiskOfBiasMetric\n serializer_class = serializers.AssessmentMetricScoreSerializer\n pagination_class = DisabledPagination\n assessment_filter_args = \"domain__assessment\"\n\n def get_queryset(self):\n return self.model.objects.all()\n\n\nclass AssessmentScoreViewset(AssessmentEditViewset):\n model = models.RiskOfBiasScore\n pagination_class = DisabledPagination\n assessment_filter_args = \"metric__domain_assessment\"\n serializer_class = serializers.RiskOfBiasScoreSerializer\n\n def get_assessment(self, request, *args, **kwargs):\n assessment_id = get_assessment_id_param(request)\n return get_object_or_404(self.parent_model, pk=assessment_id)\n\n @action(detail=False)\n def choices(self, request):\n assessment_id = self.get_assessment(request)\n rob_assessment = models.RiskOfBiasAssessment.objects.get(assessment_id=assessment_id)\n return Response(rob_assessment.get_rob_response_values())\n\n def create(self, request, *args, **kwargs):\n # create using one serializer; return using a different one\n serializer = serializers.RiskOfBiasScoreOverrideCreateSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n new_serializer = serializers.RiskOfBiasScoreSerializer(serializer.instance)\n headers = self.get_success_headers(new_serializer.data)\n return Response(new_serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def perform_destroy(self, instance):\n if instance.is_default:\n raise PermissionDenied(\"Cannot delete a default risk of bias score\")\n instance.delete()\n\n\nclass ScoreCleanupViewset(CleanupFieldsBaseViewSet):\n model = models.RiskOfBiasScore\n serializer_class = serializers.RiskOfBiasScoreCleanupSerializer\n assessment_filter_args = \"metric__domain__assessment\"\n", "sub_path": "hawc/apps/riskofbias/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 7609, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "common.views.AssessmentPermissionsMixin", "line_number": 30, "usage_type": "name"}, {"api_name": "common.api.LegacyAssessmentAdapterMixin", "line_number": 30, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 30, "usage_type": "name"}, {"api_name": "assessment.models.Assessment", "line_number": 32, "usage_type": "name"}, {"api_name": "study.models.Study", "line_number": 33, "usage_type": "name"}, {"api_name": "assessment.api.AssessmentLevelPermissions", "line_number": 34, "usage_type": "name"}, {"api_name": "common.serializers.UnusedSerializer", "line_number": 35, "usage_type": "name"}, {"api_name": "common.helper.re_digits", "line_number": 36, "usage_type": "name"}, {"api_name": "riskofbias.exports.RiskOfBiasFlat", "line_number": 50, "usage_type": "call"}, {"api_name": "riskofbias.exports", "line_number": 50, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 45, "usage_type": "call"}, {"api_name": "common.renderers.PandasRenderers", "line_number": 45, "usage_type": "name"}, {"api_name": "riskofbias.exports.RiskOfBiasCompleteFlat", "line_number": 61, "usage_type": "call"}, {"api_name": "riskofbias.exports", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 56, "usage_type": "call"}, {"api_name": "common.renderers.PandasRenderers", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 67, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 67, "usage_type": "name"}, {"api_name": "assessment.api.DisabledPagination", "line_number": 70, "usage_type": "name"}, {"api_name": "assessment.api.AssessmentLevelPermissions", "line_number": 71, "usage_type": "name"}, {"api_name": "assessment.api.InAssessmentFilter", "line_number": 72, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 72, "usage_type": "name"}, {"api_name": "common.helper.re_digits", "line_number": 74, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 80, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 80, "usage_type": "name"}, {"api_name": "assessment.api.DisabledPagination", "line_number": 83, "usage_type": "name"}, {"api_name": "assessment.api.AssessmentLevelPermissions", "line_number": 84, "usage_type": "name"}, {"api_name": "assessment.api.InAssessmentFilter", "line_number": 85, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 85, "usage_type": "name"}, {"api_name": "common.helper.re_digits", "line_number": 87, "usage_type": "name"}, {"api_name": "study.models", "line_number": 96, "usage_type": "name"}, {"api_name": "mgmt.models.Task.objects.ensure_rob_started", "line_number": 98, "usage_type": "call"}, {"api_name": "study.models", "line_number": 98, "usage_type": "argument"}, {"api_name": "mgmt.models.Task.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "mgmt.models.Task", "line_number": 98, "usage_type": "name"}, {"api_name": "mgmt.models.Task.objects.ensure_rob_stopped", "line_number": 100, "usage_type": "call"}, {"api_name": "study.models", "line_number": 100, "usage_type": "argument"}, {"api_name": "mgmt.models.Task.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "mgmt.models.Task", "line_number": 100, "usage_type": "name"}, {"api_name": "assessment.models.TimeSpentEditing.add_time_spent_job", "line_number": 104, "usage_type": "call"}, {"api_name": "assessment.models.TimeSpentEditing", "line_number": 104, "usage_type": "name"}, {"api_name": "common.helper.tryParseInt", "line_number": 112, "usage_type": "call"}, {"api_name": "study.models", "line_number": 115, "usage_type": "name"}, {"api_name": "study.models.Study.objects.get", "line_number": 115, "usage_type": "call"}, {"api_name": "study.models.Study.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "study.models.Study", "line_number": 115, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 116, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 117, "usage_type": "call"}, {"api_name": "study.models.user_can_edit_study", "line_number": 120, "usage_type": "call"}, {"api_name": "study.models", "line_number": 120, "usage_type": "name"}, {"api_name": "study.models.assessment", "line_number": 120, "usage_type": "attribute"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 137, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 134, "usage_type": "call"}, {"api_name": "assessment.api.AssessmentViewset", "line_number": 140, "usage_type": "name"}, {"api_name": "assessment.api.DisabledPagination", "line_number": 143, "usage_type": "name"}, {"api_name": "assessment.api.AssessmentViewset", "line_number": 150, "usage_type": "name"}, {"api_name": "assessment.api.DisabledPagination", "line_number": 153, "usage_type": "name"}, {"api_name": "assessment.api.AssessmentEditViewset", "line_number": 160, "usage_type": "name"}, {"api_name": "assessment.api.DisabledPagination", "line_number": 162, "usage_type": "name"}, {"api_name": "assessment.api.get_assessment_id_param", "line_number": 167, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 168, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 174, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 170, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 183, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 183, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 183, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 187, "usage_type": "call"}, {"api_name": "common.api.CleanupFieldsBaseViewSet", "line_number": 191, "usage_type": "name"}]} {"seq_id": "7116017", "text": "# added aggressive colony, strength of Blobs as indicator of who gets to eat\n# when two agro colonies meet, only stronger one gets to eat\n# swapped 'if len(list) > 0' to 'if not list'\n# agroBlobs now spawn with a slightly mutated strength\n# adjusted the scoreboard to show average strength of the colony\n\nimport pygame, numpy, random\n\n# Define some colors\nBLACK = ( 0, 0, 0)\nWHITE = ( 255, 255, 255)\nRED = ( 255, 0, 0)\nGREEN = ( 0, 255, 0)\nBLUE = ( 0, 0, 255)\nPURPLE = ( 128, 0, 128)\n# Set window parameters\npygame.init()\nsize = width, height = 680, 680\npixel = 5\npixWidth, pixHeight = width/pixel, height/pixel\npygame.display.set_caption(\"Game of Life\")\nscreen = pygame.display.set_mode(size)\nkeys_down = set()\nmyfont = pygame.font.SysFont(\"monospace\", 25)\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n \n# Lay seed for random fuctions\nrandom.seed\n\n# Classes\n\nclass Blob:\n \"\"\"Blob is a green or red block that moves around, repopulates, eats and dies\"\"\"\n \n def __init__(self, s, l, a=0):\n self.strength = s\n self.life = l\n self.age = a\n \n def strength(self):\n return self.strength\n \nclass TheMap:\n \"\"\"Map stores screen size as well as 3D array of Blobs, also includes funtions for interaction of Blobs\"\"\"\n \n def __init__(self,pixWidth,pixHeight,pixel):\n \"\"\"Initialises the map of given width and height with 500 Blobs of each species\"\"\"\n \n self.pixWidth = pixWidth\n self.pixHeight = pixHeight\n self.pixel = pixel\n self.turn = 0\n \n \n self.count = [ 0 for i in range(nColonies) ]\n self.avgStr = [ 0 for i in range(nColonies) ]\n \n self.array = [ [ [ [] for z in range(nColonies) ] for y in range(pixHeight) ] for x in range(pixWidth) ]\n for i in range(nColonies):\n for n in range(1000):\n x = random.randrange(0,pixWidth)\n y = random.randrange(0,pixHeight)\n \n s = numpy.random.normal(50,1) # strength set as a normal distribution \n l = numpy.random.normal(life,1) # life span altered by plus/minus normal distribution\n \n self.array[x][y][i].append(Blob(s,l))\n \n \n def moveBlobs(self):\n \"\"\"Moves blobs randomly within provided screen size, also counts each colony size\"\"\"\n \n # --- Create an arrray to store next turn\n tmp_array = [ [ [ [] for z in range(nColonies) ] for y in range(pixHeight) ] for x in range(pixWidth) ]\n \n # --- Set count to 0\n for i in range(nColonies):\n self.count[i] = 0\n self.avgStr[i] = 0\n \n # --- Go through each pixel of each layer of Map\n for i in range(nColonies):\n for x in range(self.pixWidth):\n for y in range(self.pixHeight):\n \n while self.array[x][y][i]:\n self.count[i] += 1 # count the blob that is beeing moved\n self.avgStr[i] += self.array[x][y][i][-1].strength\n \n xx = x + random.choice([-1,0,1])\n yy = y + random.choice([-1,0,1])\n \n if xx >= self.pixWidth:\n xx = 0\n elif xx < 0:\n xx = self.pixWidth - 1\n if yy >= self.pixHeight:\n yy = 0\n elif yy < 0:\n yy = self.pixHeight - 1\n \n if len(tmp_array[xx][yy][i]) < 5:\n tmp_array[xx][yy][i].append(self.array[x][y][i].pop())\n else:\n break\n \n self.avgStr[i] /= self.count[i] \n self.array = tmp_array\n \n def actBlobs(self):\n \"\"\"Checks if aggresive blobs ate something and kills the old ones \"\"\"\n \n # --- Create an arrray to store next turn\n tmp_array = [ [ [ [] for z in range(nColonies) ] for y in range(pixHeight) ] for x in range(pixWidth) ]\n \n # --- Go through each pixel in Map\n for x in range(self.pixWidth):\n for y in range(self.pixHeight):\n \n # --- Remove the old aggressive Blobs\n for i in range(1,nColonies):\n for dying in self.array[x][y][i]:\n dying.age += 1\n if dying.age > life:\n self.array[x][y][i].remove(dying)\n \n # --- Resolve conflicts between aggressive Blobs\n winner = 2\n if self.array[x][y][1]:\n winner = 1\n if self.array[x][y][2]:\n strength = [0,0]\n for i in range(nColonies-1):\n for blob in self.array[x][y][i+1]:\n strength[i] += blob.strength\n strength[i] /= len(self.array[x][y][i+1])\n if strength[1] == strength[0]:\n winner = random.choice([1,2])\n elif strength[1] > strength[0]:\n winner = 2\n \n # --- Sort the agroBlobs by strenght (increasing)\n for i in range(1,nColonies):\n self.array[x][y][i].sort(key=lambda Blob: Blob.strength) # sorts list with strenght as a key\n \n # --- Let the winner eat green Blobs\n while self.array[x][y][winner] and self.array[x][y][0]:\n self.array[x][y][0].pop()\n s = self.array[x][y][winner][-1].strength + numpy.random.normal(0,2)\n tmp_array[x][y][winner].append(self.array[x][y][winner].pop())\n \n xx = x + random.choice([-1,0,1])\n yy = y + random.choice([-1,0,1])\n \n if xx >= self.pixWidth:\n xx = 0\n elif xx < 0:\n xx = self.pixWidth - 1\n if yy >= self.pixHeight:\n yy = 0\n elif yy < 0:\n yy = self.pixHeight - 1\n \n if len(tmp_array[xx][yy][winner]) < 5:\n tmp_array[xx][yy][winner].append(Blob(s,life))\n else:\n break\n \n # --- Surviving Greens get a chance to repopulate\n while self.array[x][y][0]:\n \n if random.random() < span_chance: \n xx = x + random.choice([-1,0,1])\n yy = y + random.choice([-1,0,1])\n \n if xx >= self.pixWidth:\n xx = 0\n elif xx < 0:\n xx = self.pixWidth - 1\n if yy >= self.pixHeight:\n yy = 0\n elif yy < 0:\n yy = self.pixHeight - 1\n \n if len(tmp_array[xx][yy][0]) < 5: \n tmp_array[xx][yy][0].append(self.array[x][y][0][-1])\n else:\n break\n \n if len(tmp_array[x][y][0]) < 5: \n tmp_array[x][y][0].append(self.array[x][y][0].pop())\n else:\n break \n \n # --- Transfer the remaining agroBlobs to new array\n for i in range(1,nColonies): \n while self.array[x][y][i]:\n if len(tmp_array[x][y][i]) < 5:\n tmp_array[x][y][i].append(self.array[x][y][i].pop())\n else:\n break\n \n # --- Update the array\n self.array = tmp_array\n self.turn += 1\n \n def draw(self,screen):\n \"\"\"Draws the map with blobs\"\"\"\n \n for x in range(self.pixWidth):\n for y in range(self.pixHeight):\n xy = [x*self.pixel,y*self.pixel,self.pixel,self.pixel]\n \n if self.array[x][y][1]:\n pygame.draw.rect(screen,RED,xy)\n if self.array[x][y][2]:\n pygame.draw.polygon(screen,PURPLE,[[x*self.pixel,y*self.pixel],[(x+1)*self.pixel,y*self.pixel],[x*self.pixel,(y+1)*self.pixel]])\n elif self.array[x][y][2]:\n pygame.draw.rect(screen,PURPLE,xy)\n elif self.array[x][y][0]:\n pygame.draw.rect(screen,GREEN,xy)\n \n pygame.draw.rect(screen,BLACK,[0,0,200,85])\n pygame.draw.rect(screen,WHITE,[0,0,200,85],2)\n \n label = myfont.render('Size', 1, WHITE)\n screen.blit(label, (3, 1))\n label = myfont.render(str(self.count[0]), 1, GREEN)\n screen.blit(label, (3, 21))\n label = myfont.render(str(self.count[1]), 1, RED)\n screen.blit(label, (3, 41))\n label = myfont.render(str(self.count[2]), 1, PURPLE)\n screen.blit(label, (3, 61))\n \n label = myfont.render('avgStr', 1, WHITE)\n screen.blit(label, (80, 1))\n #label = myfont.render(str(self.avgStr[0]), 1, GREEN)\n #screen.blit(label, (80, 21))\n label = myfont.render(\"{:.3f}\".format(self.avgStr[1]), 1, RED)\n screen.blit(label, (80, 41))\n label = myfont.render(\"{:.3f}\".format(self.avgStr[2]), 1, PURPLE)\n screen.blit(label, (80, 61))\n# Functions\n\n# Variables\nnColonies = 3\n\nlife = 12\t\t # how many turns it takes for aggresive blob to die\nspan_chance = 0.20\t\t# percentage chance of green blob spanning offspring\n\ndone = False\nRUNNING, PAUSE = 0,1\nstate = RUNNING\n\n# Pre-start set up\ntheMap = TheMap(pixWidth,pixHeight,pixel) # initalises theMap with blobs\ntheMap.draw(screen)\n\n# -------- Main Program Loop -----------\nwhile not done:\n \n # --- Event loop (not yet fully useful)\n \n for event in pygame.event.get(): # did user do something\n if event.type == pygame.KEYDOWN: # if user pushed any key\n if event.key == pygame.K_SPACE: state = not state\n if event.key == pygame.K_ESCAPE:\n done = True\n state = PAUSE\n \n if state == RUNNING:\n # --- Game logic\n theMap.moveBlobs() # moves the Blobs around in the array\n \n theMap.actBlobs() # aggressive Blobs eat Green ones, they all reproduce\n \n # --- Clear the screen\n screen.fill(BLACK)\n \n # --- Drawing new screen\n theMap.draw(screen)\n \n # --- Update screen with what was drawn\n pygame.display.flip()\n \n # --- Limits frames per second\n clock.tick(10)\n", "sub_path": "gameoflife/map11.py", "file_name": "map11.py", "file_ext": "py", "file_size_in_byte": 11637, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 26, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 29, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 62, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 91, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 92, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 150, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 153, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 154, "usage_type": "call"}, {"api_name": "random.random", "line_number": 173, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 174, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 175, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 216, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 218, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 218, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 220, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 222, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 222, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 225, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 265, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 265, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 267, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 268, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 285, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 285, "usage_type": "attribute"}]} {"seq_id": "159852368", "text": "import pygame\nimport numpy as np\nimport random\nfrom Tile import Tile\nfrom global_const import *\n\nRESIZABLE_FLAGS = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF\nFULLSCREEN_FLAGS = pygame.FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF\n\n\nclass Game:\n def __init__(self):\n self.play = True\n self.fullscreen = False\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode(RESOLUTION, RESIZABLE_FLAGS)\n\n # Needed for tiles background\n self.tiles_background = [[Tile() for _ in range(BOARD_SIZE)] \n for _ in range(BOARD_SIZE)]\n self.board = np.array([[Tile() for _ in range(BOARD_SIZE)] \n for _ in range(BOARD_SIZE)])\n self.board_random(2)\n\n def run(self):\n \"\"\"Main game loop\"\"\"\n while self.play:\n self.catch_events()\n self.draw()\n self.update()\n\n def catch_events(self):\n \"\"\"Catch key events\"\"\"\n for event in pygame.event.get():\n # Quit\n if event.type == pygame.QUIT:\n self.play = False\n\n # Video resize\n if event.type == pygame.VIDEORESIZE:\n if not self.fullscreen:\n new_width = event.w if event.w >= MIN_WIDTH else \\\n MIN_WIDTH\n new_height = event.h if event.h >= MIN_HEIGHT else \\\n MIN_HEIGHT\n self.screen = pygame.display.set_mode(\n (new_width, new_height), \n RESIZABLE_FLAGS)\n self.update_tiles_position(\n width=new_width, \n height=new_height)\n\n # KeyDown events\n if event.type == pygame.KEYDOWN:\n # Check if alt is pressed\n pressed = pygame.key.get_pressed()\n alt_held = pressed[pygame.K_LALT] or pressed[pygame.K_RALT]\n # Quit on alt + F4\n if event.key == pygame.K_F4 and alt_held:\n self.play = False\n\n # Fullscreen toggle\n if event.key == pygame.K_f:\n self.fullscreen = not self.fullscreen\n if self.fullscreen:\n self.screen = pygame.display.set_mode(\n MONITOR_RESOLUTION, \n FULLSCREEN_FLAGS)\n else:\n self.screen = pygame.display.set_mode(\n RESOLUTION, \n RESIZABLE_FLAGS)\n self.update_tiles_position()\n\n # Move event\n if event.key in [pygame.K_UP, pygame.K_DOWN, \n pygame.K_LEFT, pygame.K_RIGHT]:\n self.move(event.key)\n self.board_random()\n\n def move(self, direction):\n '''Movement\n \n Args:\n direction (pygame.KEY): Pressed key (K_UP, K_DOWN, K_LEFT, K_RIGHT)\n '''\n if direction in (pygame.K_UP, pygame.K_DOWN):\n self.board = self.board.T\n if direction in (pygame.K_RIGHT, pygame.K_DOWN):\n self.board = self.board[:,::-1]\n\n for i in range(BOARD_SIZE):\n for j in range(BOARD_SIZE - 1):\n for k in range(j + 1, BOARD_SIZE):\n if self.board[i][k].value == 0:\n continue\n if self.board[i][j].value != 0 and \\\n self.board[i][j].value != self.board[i][k].value:\n break\n if self.board[i][j].value == 0 or \\\n self.board[i][j].value == self.board[i][k].value:\n self.board[i][j].value += self.board[i][k].value\n self.board[i][k].value = 0\n\n\n if direction in (pygame.K_RIGHT, pygame.K_DOWN):\n self.board = self.board[:,::-1]\n if direction in (pygame.K_UP, pygame.K_DOWN):\n self.board = self.board.T\n\n def draw(self):\n \"\"\"Draws surface\"\"\"\n self.screen.fill(COLORS['BACKGROUND'])\n # Draw tiles\n [[col.draw(self.screen) for col in row] \n for row in self.board]\n\n def update_tiles_position(self, width=0, height=0):\n \"\"\"Update tiles position\n \n Args:\n width (int, optional, default=0): Surface width\n height (int, optional, default=0): Surface height\n \"\"\"\n [[col.set_pos(self.fullscreen, width, height) for col in row] \n for row in self.board]\n\n def board_random(self, amount=1):\n '''Put value '2' in random place in self.board\n \n Args:\n amount (int, optional): amount of values to place\n \n Returns:\n bool: return true if success else false\n '''\n for _ in range(amount):\n if self.check_if_lose():\n self.play = False\n return False\n while True:\n row = random.randint(0, BOARD_SIZE - 1)\n col = random.randint(0, BOARD_SIZE - 1)\n if self.board[row][col].value != 0:\n continue\n self.board[row][col].value = 2\n break\n return True\n\n def check_if_lose(self):\n \"\"\"Checks if a board is full\n \n Returns:\n BOOL: Return True if board is full else False\n \"\"\"\n for row in self.board:\n for col in row:\n if not col.value:\n return False\n return True\n\n def update(self):\n \"\"\"Code which update screen\"\"\"\n pygame.display.flip()\n self.clock.tick(FPS)\n", "sub_path": "v2.0/Game.py", "file_name": "Game.py", "file_ext": "py", "file_size_in_byte": 5796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pygame.RESIZABLE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.FULLSCREEN", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Tile.Tile", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "Tile.Tile", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.VIDEORESIZE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.K_LALT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.K_RALT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.K_F4", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.K_f", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 108, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 142, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 143, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 164, "usage_type": "attribute"}]} {"seq_id": "630661749", "text": "# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\nfrom django.shortcuts import render, redirect\r\nfrom django.http import JsonResponse, HttpResponseNotFound, HttpResponseNotAllowed, HttpResponse\r\nfrom .models import User, Performance, Vacation, Out, Leave, Staff, Section, Position, Role, Message, Notice, Punch\r\nfrom django.utils import timezone\r\nfrom django.db import transaction\r\nfrom django.contrib.auth.hashers import make_password, check_password\r\nimport datetime\r\n\r\n# Create your views here.\r\n\r\ndef login(request):\r\n '''ๅค„็†็™ปๅฝ•'''\r\n if request.method == 'POST':\r\n username = request.POST.get('username', None)\r\n password = request.POST.get('password', None)\r\n if username and password:\r\n username = username.strip()\r\n try:\r\n user_obj = User.objects.get(user_name=username)\r\n if check_password(password, user_obj.user_passwd):\r\n request.session['user_id'] = user_obj.user_id\r\n staff_name = user_obj.staff.staff_name\r\n notice = Notice.objects.filter(receiver__contains=user_obj.role_id, date__gt=(\r\n datetime.date.today() - datetime.timedelta(days=10)))\r\n message = Message.objects.filter(staff_id=user_obj.staff, date__gt=(\r\n datetime.date.today() - datetime.timedelta(days=3)))\r\n if Role.objects.get(pk=user_obj.role_id).role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n response = render(\r\n request, 'root-homepage.html', {'notice': notice, 'message': message, 'staff_name': staff_name})\r\n return response\r\n elif Role.objects.get(pk=user_obj.role_id).role_name == '้ƒจ้—จไธป็ฎก':\r\n response = render(\r\n request, 'superior-homepage.html', {'notice': notice, 'message': message, 'staff_name': staff_name})\r\n return response\r\n elif Role.objects.get(pk=user_obj.role_id).role_name == 'ๆ™ฎ้€šๅ‘˜ๅทฅ':\r\n return render(request, 'user-homepage.html', {'notice': notice, 'message': message, 'staff_name': staff_name})\r\n else:\r\n return render(request, 'login.html', {'message': 'ๅฏ†็ ้”™่ฏฏ'})\r\n except User.DoesNotExist:\r\n return render(request, 'login.html', {'message': '็”จๆˆทๅไธๅญ˜ๅœจ'})\r\n return render(request, 'login.html')\r\n\r\ndef attendance(request):\r\n '''่€ƒๅ‹ค้กต้ขๆ˜พ็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'attendance.html')\r\n\r\n\r\ndef get_performance(request):\r\n '''่€ƒๅ‹คๆ•ฐๆฎ่Žทๅ–'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n page = int(request.GET['page'])\r\n rows = int(request.GET['rows'])\r\n # ๅˆคๆ–ญไธป็ฎกๆŸฅ่ฏข่ฟ˜ๆ˜ฏ่‡ชๅทฑๆŸฅ่ฏข\r\n try:\r\n user_name = request.GET['user_name']\r\n # ไธป็ฎกๆŸฅ่ฏข\r\n user_obj = User.objects.get(user_name=user_name)\r\n except:\r\n # ่‡ชๅทฑๆŸฅ่ฏข\r\n try:\r\n user_obj = User.objects.get(pk=user_id)\r\n except User.DoesNotExist:\r\n return render(request, 'login.html', {'message': '็”จๆˆทไฟกๆฏ่ฟ‡ๆœŸ๏ผŒ่ฏท้‡ๆ–ฐ็™ปๅฝ•'})\r\n performance_info = Performance.objects.filter(\r\n staff_id=user_obj.staff.staff_id).order_by('salaryMonth').values('salaryMonth', 'onday', 'lateday', 'offday', 'addday', 'outday')\r\n for i in performance_info:\r\n i['user_name'] = user_obj.user_name\r\n i['staff_name'] = user_obj.staff.staff_name\r\n i['salaryMonth'] = '{}-{}'.format(i['salaryMonth'].year,\r\n i['salaryMonth'].month)\r\n total = len(performance_info)\r\n res = {\r\n \"total\": total,\r\n \"rows\": performance_info[((page - 1) * rows):(page * rows)]\r\n }\r\n return JsonResponse(res)\r\n\r\n\r\ndef vacation(request):\r\n '''ๅ‡ๆœŸ่Žทๅ–'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n userObj = User.objects.get(pk=user_id)\r\n vacationObj = Vacation.objects.get(staff_id=userObj.staff.staff_id)\r\n vacation_info = [\r\n {\r\n 'field': 'ๅทฅๅท',\r\n 'value': userObj.user_name,\r\n },\r\n {\r\n 'field': 'ๅง“ๅ',\r\n 'value': userObj.staff.staff_name,\r\n },\r\n {\r\n 'field': 'ๅนดๅ‡',\r\n 'value': vacationObj.yearday,\r\n },\r\n {\r\n 'field': '็—…ๅ‡',\r\n 'value': vacationObj.illday,\r\n },\r\n {\r\n 'field': 'ๅนดไผ‘',\r\n 'value': vacationObj.yearrest,\r\n },\r\n {\r\n 'field': '็—…ไผ‘',\r\n 'value': vacationObj.illrest,\r\n },\r\n {\r\n 'field': 'ๅ‰ฉไฝ™ๅนดๅ‡',\r\n 'value': (int(vacationObj.yearday) - int(vacationObj.yearrest)),\r\n },\r\n {\r\n 'field': 'ๅ‰ฉไฝ™็—…ๅ‡',\r\n 'value': (int(vacationObj.illday) - int(vacationObj.illrest)),\r\n },\r\n ]\r\n return render(request, 'vacation.html', {'vacation_info': vacation_info})\r\n\r\n\r\ndef punch(request):\r\n '''ๆ‰“ๅกๅค„็†'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n staff = User.objects.get(pk=user_id).staff\r\n try:\r\n Punch.objects.get(staff=staff, date__date=datetime.date.today())\r\n return HttpResponse('ไปŠๅคฉๅทฒ็ปๆ‰“ๅกไบ†')\r\n except Punch.DoesNotExist:\r\n latetime = 9\r\n performance = Performance.objects.get(staff=staff, salaryMonth__year=timezone.now(\r\n ).year, salaryMonth__month=timezone.now().month)\r\n if timezone.now().hour > latetime:\r\n Punch.objects.create(staff=staff, date=timezone.now(), late=True)\r\n performance.lateday += 1\r\n else:\r\n Punch.objects.create(staff=staff, date=timezone.now(), late=False)\r\n performance.onday += 1\r\n performance.save()\r\n return HttpResponse('ๆ‰“ๅกๆˆๅŠŸ')\r\n return HttpResponseNotFound('404')\r\n\r\n\r\ndef salary_show(request):\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'salary.html')\r\n\r\n\r\ndef salary(request):\r\n '''ๅทฅ่ต„่Žทๅ–'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n page = int(request.GET['page'])\r\n rows = int(request.GET['rows'])\r\n user_obj = User.objects.get(pk=user_id)\r\n vacation_obj = Vacation.objects.get(staff_id=user_obj.staff.staff_id)\r\n performance_info = Performance.objects.filter(staff_id=user_obj.staff.staff_id).order_by(\r\n 'salaryMonth').values('onday', 'basepay', 'lateday', 'offday', 'addday', 'outday', 'salaryMonth')\r\n reslist = []\r\n total = len(performance_info)\r\n for i in performance_info:\r\n day_pay = i['basepay'] // 22\r\n overtime_pay = int(i['addday'] * day_pay * 1.5)\r\n outday_pay = int(i['outday'] * day_pay * 1.5)\r\n lateday_deduct = int(i['lateday'] * day_pay * 0.2)\r\n offday_deduct = int(i['offday'] * day_pay)\r\n busy_deduct = vacation_obj.busyday * day_pay\r\n if vacation_obj.illday - vacation_obj.illrest < 0:\r\n ill_deduct = (vacation_obj.illrest - vacation_obj.illday) * day_pay\r\n else:\r\n ill_deduct = 0\r\n salary = i['basepay'] + overtime_pay + outday_pay - \\\r\n lateday_deduct - offday_deduct - busy_deduct - ill_deduct\r\n resdict = {\r\n 'salaryMonth': '{}-{}'.format(i['salaryMonth'].year, i['salaryMonth'].month),\r\n 'user_name': user_obj.user_name,\r\n 'staff_name': user_obj.staff.staff_name,\r\n 'onday': i['onday'],\r\n 'basepay': i['basepay'],\r\n 'overtime_pay': overtime_pay,\r\n 'outday_pay': outday_pay,\r\n 'lateday_deduct': lateday_deduct,\r\n 'offday_deduct': offday_deduct,\r\n 'busy_deduct': busy_deduct,\r\n 'ill_deduct': ill_deduct,\r\n 'totalpay': salary\r\n }\r\n reslist.append(resdict)\r\n res = {\r\n \"total\": total,\r\n \"rows\": reslist[((page - 1) * rows):(page * rows)]\r\n }\r\n return JsonResponse(res)\r\n\r\n\r\ndef overtime_show(request):\r\n '''ๅŠ ็ญ็”ณ่ฏทๅฑ•็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'jiaban.html')\r\n\r\n\r\ndef overtime_application(request):\r\n '''ๅŠ ็ญ็”ณ่ฏทๅค„็†'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'POST':\r\n startTime = request.POST.get('startTime')\r\n endTime = request.POST.get('endTime')\r\n startTime = datetime.datetime.strptime(startTime, '%Y-%m-%d %H:%M')\r\n endTime = datetime.datetime.strptime(endTime, '%Y-%m-%d %H:%M')\r\n subtime = endTime - startTime\r\n addday = round((subtime.days + subtime.seconds / 86400), 2)\r\n user_obj = User.objects.get(pk=user_id)\r\n performance_obj = Performance.objects.get(\r\n staff_id=user_obj.staff.staff_id, salaryMonth__year=timezone.now().year, salaryMonth__month=timezone.now().month)\r\n performance_obj.addday += addday\r\n performance_obj.save()\r\n return HttpResponse('ๆไบคๆˆๅŠŸ')\r\n\r\n\r\ndef out_show(request):\r\n '''ๅค–ๅ‡บๆ˜พ็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'waichu.html')\r\n\r\n\r\ndef out_application(request):\r\n '''ๅค–ๅ‡บ็”ณ่ฏทๅค„็†'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'POST':\r\n destination = request.POST.get('destination')\r\n reason = request.POST.get('reason')\r\n startday = request.POST.get('startDay')\r\n endday = request.POST.get('endDay')\r\n if Out.objects.filter(startday__lte=startday, endday__gte=startday, statu__in=['unapproved','pass']):\r\n return HttpResponse('ๆ‚จๅœจ{}ๅˆฐ{}ๅทฒๆœ‰ๅค–ๅ‡บๅœจ็”ณ่ฏท'.format(startday, endday))\r\n user_obj = User.objects.get(pk=user_id)\r\n Out.objects.create(destination=destination, reason=reason,\r\n startday=startday, endday=endday,\r\n staff_id=user_obj.staff.staff_id,\r\n applytime=timezone.now())\r\n role = Role.objects.get(pk=user_obj.role_id).role_name\r\n if role == 'ๆ™ฎ้€šๅ‘˜ๅทฅ':\r\n boss = Staff.objects.get(\r\n section__section_id=user_obj.staff.section_id, user__role__role_name='้ƒจ้—จไธป็ฎก')\r\n Message.objects.create(staff=boss, title='ๅค–ๅ‡บ็”ณ่ฏท', content='ๆ‚จ็š„ๅ‘˜ๅทฅ{}็”ณ่ฏทๅœจ{}ๅˆฐ{}ๅค–ๅ‡บๅˆฐ{}๏ผŒ่ฏทๅค„็†'.format(\r\n user_obj.staff.staff_name, startday, endday, destination))\r\n elif role == '้ƒจ้—จไธป็ฎก':\r\n boss = Staff.objects.filter(user__role__role_name='่ถ…็บง็ฎก็†ๅ‘˜')\r\n for i in boss:\r\n Message.objects.create(staff=i, title='ๅค–ๅ‡บ็”ณ่ฏท', content='ๆ‚จ็š„ๅ‘˜ๅทฅ{}็”ณ่ฏทๅœจ{}ๅˆฐ{}ๅค–ๅ‡บๅˆฐ{}๏ผŒ่ฏทๅค„็†'.format(\r\n user_obj.staff.staff_name, startday, endday, destination))\r\n return HttpResponse('็”ณ่ฏทๅทฒๆไบค๏ผŒ็ญ‰ๅพ…ๆ‰นๅค')\r\n return HttpResponseNotFound('404 not found')\r\n\r\n\r\ndef vacation_apply_show(request):\r\n '''่ฏทๅ‡็”ณ่ฏทๅฑ•็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'qingjia.html')\r\n\r\n\r\ndef vacation_apply(request):\r\n '''่ฏทๅ‡็”ณ่ฏทๅค„็†'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'POST':\r\n kind = request.POST.get('kind')\r\n reason = request.POST.get('reason')\r\n startday = request.POST.get('startDay')\r\n endday = request.POST.get('endDay')\r\n if Leave.objects.filter(startday__lte=startday, endday__gte=startday, statu__in=['unapproved','pass']):\r\n return HttpResponse('ๆ‚จๅœจ{}ๅˆฐ{}ๅทฒๆœ‰่ฏทๅ‡ๅœจ็”ณ่ฏท'.format(startday, endday))\r\n user_obj = User.objects.get(pk=user_id)\r\n staff_id = user_obj.staff.staff_id\r\n applytime = timezone.now()\r\n statu = 'unapproved'\r\n Leave.objects.create(kind=kind, reason=reason, startday=startday,\r\n endday=endday, staff_id=staff_id, applytime=applytime, statu=statu)\r\n role = Role.objects.get(pk=user_obj.role_id).role_name\r\n if role == 'ๆ™ฎ้€šๅ‘˜ๅทฅ':\r\n boss = Staff.objects.get(\r\n section__section_id=user_obj.staff.section_id, user__role__role_name='้ƒจ้—จไธป็ฎก')\r\n Message.objects.create(staff=boss, title='่ฏทๅ‡็”ณ่ฏท', content='ๆ‚จ็š„ๅ‘˜ๅทฅ{}็”ณ่ฏทๅœจ{}ๅˆฐ{}่ฏทๅ‡๏ผŒ่ฏทๅค„็†'.format(\r\n user_obj.staff.staff_name, startday, endday))\r\n elif role == '้ƒจ้—จไธป็ฎก':\r\n boss = Staff.objects.filter(user__role__role_name='่ถ…็บง็ฎก็†ๅ‘˜')\r\n for i in boss:\r\n Message.objects.create(staff=i, title='่ฏทๅ‡็”ณ่ฏท', content='ๆ‚จ็š„ๅ‘˜ๅทฅ{}็”ณ่ฏทๅœจ{}ๅˆฐ{}่ฏทๅ‡๏ผŒ่ฏทๅค„็†'.format(\r\n user_obj.staff.staff_name, startday, endday))\r\n return HttpResponse('ๆไบคๆˆๅŠŸ๏ผŒ็ญ‰ๅพ…ๅฎกๆ‰น')\r\n return HttpResponseNotFound('404 not found')\r\n\r\n\r\ndef addresslist_show(request):\r\n '''้€š่ฎฏๅฝ•้กต้ขๅฑ•็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'addresslist.html')\r\n\r\n\r\ndef addresslist(request):\r\n '''้€š่ฎฏๅฝ•ๆŸฅ่ฏข'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n page = int(request.GET['page'])\r\n rows = int(request.GET['rows'])\r\n user_obj = User.objects.exclude(\r\n role_id=Role.objects.get(role_name='่ถ…็บง็ฎก็†ๅ‘˜').role_id)\r\n staff_obj = Staff.objects.all()\r\n section_obj = Section.objects.all()\r\n position_obj = Position.objects.all()\r\n rowslist = []\r\n for i in user_obj:\r\n staff = staff_obj.get(staff_id=i.staff.staff_id)\r\n idict = {\r\n 'user_name': i.user_name,\r\n 'staff_name': staff.staff_name,\r\n 'gender': humansee(staff.gender),\r\n 'Email': staff.Email,\r\n 'phone': staff.phone_num,\r\n 'section': section_obj.get(section_id=staff.section_id).section_name,\r\n 'position': position_obj.get(position_id=staff.position_id).position_name,\r\n }\r\n rowslist.append(idict)\r\n total = len(user_obj)\r\n res = {\r\n \"total\": total,\r\n \"rows\": rowslist[((page - 1) * rows):(page * rows)]\r\n }\r\n return JsonResponse(res)\r\n\r\n\r\ndef personal(request):\r\n '''ไธชไบบไฟกๆฏๆ˜พ็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n user_obj = User.objects.get(pk=user_id)\r\n staff = user_obj.staff\r\n user_name = user_obj.user_name\r\n section = Section.objects.get(section_id=staff.section_id).section_name\r\n position = Position.objects.get(\r\n position_id=staff.position_id).position_name\r\n staff = staff\r\n month = staff.birth.month\r\n day = staff.birth.dayss\r\n if month < 10:\r\n month = '0' + str(month)\r\n if day < 10:\r\n day = '0' + str(day)\r\n birth = '{}-{}-{}'.format(staff.birth.year,\r\n staff.birth.month, staff.birth.day)\r\n return render(request, 'personal.html', locals())\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef personal_submit(request):\r\n '''ไธชไบบไฟกๆฏไฟฎๆ”น'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'POST':\r\n staff = User.objects.get(pk=user_id).staff\r\n staff_name = request.POST.get('staffName')\r\n gender = request.POST.get('gender')\r\n birth = request.POST.get('birth')\r\n phone = request.POST.get('phone')\r\n email = request.POST.get('email')\r\n try:\r\n photo = request.FILES['photo']\r\n if photo:\r\n staff.photo = photo\r\n except KeyError:\r\n pass\r\n staff.staff_name = staff_name\r\n staff.gender = gender\r\n staff.birth = birth\r\n staff.phone_num = phone\r\n staff.Email = email\r\n staff.save()\r\n return redirect('personal')\r\n return HttpResponseNotFound('404 not found')\r\n\r\n\r\ndef section_information_show(request):\r\n '''้ƒจ้—จไฟกๆฏ้กต้ขๆ˜พ็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'section-information.html')\r\n\r\n\r\ndef section_information(request):\r\n '''้ƒจ้—จไธป็ฎก็š„้ƒจ้—จไฟกๆฏ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'GET':\r\n page = int(request.GET['page'])\r\n rows = int(request.GET['rows'])\r\n user_obj = User.objects.get(pk=user_id)\r\n if Role.objects.get(pk=user_obj.role_id).role_name == '้ƒจ้—จไธป็ฎก':\r\n staff = Staff.objects.filter(\r\n section=user_obj.staff.section).exclude(user=user_id)\r\n rowlist = []\r\n for i in staff:\r\n idict = {\r\n 'section': Section.objects.get(pk=i.section_id).section_name,\r\n 'user_name': i.user.user_name,\r\n 'staff_name': i.staff_name,\r\n 'gender': humansee(i.gender),\r\n 'position': Position.objects.get(pk=i.position_id).position_name,\r\n 'email': i.Email,\r\n 'phone': i.phone_num,\r\n 'degree': i.degree,\r\n 'school': i.school,\r\n 'hiredate': i.hiredate,\r\n }\r\n rowlist.append(idict)\r\n total = len(staff)\r\n res = {\r\n \"total\": total,\r\n \"rows\": rowlist[((page - 1) * rows):(page * rows)],\r\n }\r\n return JsonResponse(res)\r\n\r\n\r\ndef humansee(string):\r\n strdict = {\r\n \"pass\": \"้€š่ฟ‡\",\r\n \"refuse\": \"ๆ‹’็ป\",\r\n \"unapproved\": \"ๆœชๆ‰นๅค\",\r\n \"sthday\": \"ไบ‹ๅ‡\",\r\n \"illday\": \"็—…ๅ‡\",\r\n \"year\": \"ๅนดไผ‘\",\r\n 'male': '็”ท',\r\n 'female': 'ๅฅณ',\r\n }\r\n return strdict[string]\r\n\r\n\r\ndef approval(request, atype):\r\n '''ๅค„็†่ฏทๅ‡ๅค–ๅ‡บ่ฎฐๅฝ•ๆ˜พ็คบ'''\r\n\r\n def deal(a_value):\r\n rowlist = []\r\n for l in a_value:\r\n staff = Staff.objects.get(pk=l['staff_id'])\r\n ldict = {\r\n 'date': l['applytime'],\r\n 'user_name': User.objects.get(staff=staff).user_name,\r\n 'staff_name': staff.staff_name,\r\n 'section': Section.objects.get(pk=staff.section_id).section_name,\r\n 'position': Position.objects.get(pk=staff.position_id).position_name,\r\n 'reason': l['reason'],\r\n 'startday': l['startday'],\r\n 'endday': l['endday'],\r\n 'status': humansee(l['statu']),\r\n }\r\n if atype == Out:\r\n ldict['destination'] = l['destination']\r\n ldict['id'] = l['out_id']\r\n if atype == Leave:\r\n ldict['kind'] = humansee(l['kind'])\r\n ldict['id'] = l['leave_id']\r\n rowlist.append(ldict)\r\n total = len(a_value)\r\n res = {\r\n \"total\": total,\r\n \"rows\": rowlist[((page - 1) * rows):(page * rows)]\r\n }\r\n return res\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'GET':\r\n user_obj = User.objects.get(pk=user_id)\r\n page = int(request.GET.get('page'))\r\n rows = int(request.GET.get('rows'))\r\n role = Role.objects.get(role_id=user_obj.role_id)\r\n if role.role_name == '้ƒจ้—จไธป็ฎก':\r\n a_value = atype.objects.filter(staff__section_id=user_obj.staff.section_id, statu='unapproved').exclude(\r\n staff=user_obj.staff).values()\r\n return deal(a_value)\r\n elif role.role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n a_value = atype.objects.filter(\r\n staff__user__role__role_name='้ƒจ้—จไธป็ฎก', statu='unapproved').values()\r\n return deal(a_value)\r\n return 403\r\n return 404\r\n\r\n\r\ndef vacation_approval_show(request):\r\n '''่ฏทๅ‡้กต้ขๆ˜พ็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'qingjiacheck.html')\r\n\r\n\r\ndef vacation_approval(request):\r\n '''่ฏทๅ‡่ฎฐๅฝ•ๆŸฅ่ฏข'''\r\n res = approval(request, Leave)\r\n if type(res).__name__ == 'dict':\r\n return JsonResponse(res)\r\n elif res == 403:\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n else:\r\n return HttpResponseNotFound('404 not found')\r\n\r\n\r\ndef leave_deal(request):\r\n '''่ฏทๅ‡ๅฎกๆ‰นๅค„็†'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n role = Role.objects.get(pk=User.objects.get(pk=user_id).role_id)\r\n if role.role_name == 'ๆ™ฎ้€šๅ‘˜ๅทฅ':\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n if request.method == 'POST':\r\n ids = request.POST.get('ids')\r\n app = request.POST.get('app')\r\n ids = map(lambda x: int(x), ids.split())\r\n exmess = ''\r\n if app == 'pass':\r\n for i in ids:\r\n leave = Leave.objects.get(pk=i)\r\n staff = Staff.objects.get(pk=leave.staff_id)\r\n if leave.statu != 'unapproved':\r\n exmess += ' {}็š„่ฏทๅ‡ๅทฒ่ขซๅ…ถไป–ไบบๅค„็† '.format(staff.staff_name)\r\n continue\r\n leave.statu = 'pass'\r\n day = (leave.endday - leave.startday).days\r\n vacation = Vacation.objects.get(staff_id=leave.staff_id)\r\n if leave.kind == 'sthday':\r\n vacation.busyday += day\r\n elif leave.kind == 'illday':\r\n vacation.illrest += day\r\n elif leave.kind == 'year':\r\n if vacation.yearrest + day > vacation.yearday:\r\n return HttpResponse('{}็š„ๅนดๅ‡ไธ่ถณ!'.format(staff.staff_name))\r\n vacation.yearrest += day\r\n else:\r\n return HttpResponseNotFound('404')\r\n leave.save()\r\n vacation.save()\r\n Message.objects.create(staff_id=staff.staff_id, title='่ฏทๅ‡ๅฎกๆ‰นๅ›žๅค',\r\n content='ๆ‚จไปŽ{}ๅˆฐ{}็š„่ฏทๅ‡ๅทฒ่ขซๆ‰นๅ‡†!!'.format(leave.startday, leave.endday))\r\n return HttpResponse('ๆ“ไฝœๆˆๅŠŸ' + exmess)\r\n elif app == 'reject':\r\n Leave.objects.filter(pk__in=ids).update(statu='refuse')\r\n for i in ids:\r\n leave = Leave.objects.get(pk=i)\r\n staff = Staff.objects.get(pk=leave.staff_id)\r\n if leave.statu != 'unapproved':\r\n exmess += ' {}็š„่ฏทๅ‡ๅทฒ่ขซๅ…ถไป–ไบบๅค„็† '.format(staff.staff_name)\r\n continue\r\n Message.objects.create(staff_id=staff.staff_id, title='่ฏทๅ‡ๅฎกๆ‰นๅ›žๅค',\r\n content='ๆ‚จไปŽ{}ๅˆฐ{}็š„่ฏทๅ‡ๅทฒ่ขซๆ‹’็ป!!'.format(leave.startday, leave.endday))\r\n return HttpResponse('ๆ“ไฝœๆˆๅŠŸ' + exmess)\r\n return HttpResponseNotFound('404')\r\n\r\n\r\ndef out_approval_show(request):\r\n '''ๅค–ๅ‡บ้กต้ขๆ˜พ็คบ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'waichucheck.html')\r\n\r\n\r\ndef out_approval(request):\r\n '''ๅค–ๅ‡บ่ฎฐๅฝ•ๆŸฅ่ฏข'''\r\n res = approval(request, Out)\r\n if type(res).__name__ == 'dict':\r\n return JsonResponse(res)\r\n elif res == 403:\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n else:\r\n return HttpResponseNotFound('404 not found')\r\n\r\n\r\ndef out_deal(request):\r\n '''ๅค–ๅ‡บๅฎกๆ‰นๅค„็†'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n role = Role.objects.get(pk=User.objects.get(pk=user_id).role_id)\r\n if role.role_name == 'ๆ™ฎ้€šๅ‘˜ๅทฅ':\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n if request.method == 'POST':\r\n ids = request.POST.get('ids')\r\n app = request.POST.get('app')\r\n ids = map(lambda x: int(x), ids.split())\r\n exmess = ''\r\n if app == 'pass':\r\n for i in ids:\r\n out = Out.objects.get(pk=i)\r\n staff = Staff.objects.get(pk=out.staff_id)\r\n if out.statu != 'unapproved':\r\n exmess += ' {}็š„ๅค–ๅ‡บๅทฒ่ขซๅ…ถไป–ไบบๅค„็† '.format(staff.staff_name)\r\n continue\r\n out.statu = 'pass'\r\n day = (out.endday - out.startday).days\r\n performance = Performance.objects.get(\r\n staff_id=staff, salaryMonth__year=out.startday.year, salaryMonth__month=out.startday.month)\r\n performance.outday += day\r\n Message.objects.create(staff_id=staff.staff_id, title='ๅค–ๅ‡บๅฎกๆ‰นๅ›žๅค',\r\n content='ๆ‚จไปŽ{}ๅˆฐ{}็š„ๅค–ๅ‡บๅทฒ่ขซๆ‰นๅ‡†!!'.format(out.startday, out.endday))\r\n out.save()\r\n performance.save()\r\n return HttpResponse('ๆ“ไฝœๆˆๅŠŸ' + exmess)\r\n elif app == 'reject':\r\n for i in ids:\r\n out = Out.objects.get(pk=i)\r\n staff = Staff.objects.get(pk=out.staff_id)\r\n if out.statu != 'unapproved':\r\n exmess += ' {}็š„ๅค–ๅ‡บๅทฒ่ขซๅ…ถไป–ไบบๅค„็† '.format(staff.staff_name)\r\n continue\r\n out.statu = 'refuse'\r\n Message.objects.create(staff_id=staff.staff_id, title='ๅค–ๅ‡บๅฎกๆ‰นๅ›žๅค',\r\n content='ๆ‚จไปŽ{}ๅˆฐ{}็š„ๅค–ๅ‡บๅทฒ่ขซๆ‹’็ป!!'.format(out.startday, out.endday))\r\n return HttpResponse('ๆ“ไฝœๆˆๅŠŸ' + exmess)\r\n return HttpResponseNotFound('404')\r\n\r\n\r\ndef logout(request):\r\n '''้€€ๅ‡บ็ณป็ปŸ'''\r\n del request.session['user_id']\r\n return render(request, 'login.html')\r\n\r\n\r\ndef showjob(request):\r\n '''่ฟ”ๅ›ž่Œไฝ้กต้ข'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n user_obj = User.objects.get(pk=user_id)\r\n if Role.objects.get(role_id=user_obj.role_id).role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n section = Section.objects.exclude(pk=user_obj.staff.section_id)\r\n return render(request, 'job.html', {'section': section})\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n\r\n\r\ndef getjob(request):\r\n '''่Žทๅ–่Œไฝๆ•ฐๆฎ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n user_obj = User.objects.get(pk=user_id)\r\n if Role.objects.get(role_id=user_obj.role_id).role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n page = request.GET['page']\r\n rows = request.GET['rows']\r\n Position_list = list(Position.objects.exclude(pk=user_obj.staff.position_id).values(\r\n 'position_id', 'position_name', 'section_id'))\r\n for i in Position_list:\r\n i[\"section_name\"] = Section.objects.get(\r\n section_id=i[\"section_id\"]).section_name\r\n total = len(Position_list)\r\n p = int(page) - 1\r\n r = int(rows)\r\n row = Position_list[(p * r):]\r\n joblist = {\"total\": total, \"rows\": row}\r\n return JsonResponse(joblist)\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n\r\n\r\ndef showsection(request):\r\n '''่ฟ”ๅ›ž้ƒจ้—จ้กต้ข'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'section.html')\r\n\r\n\r\ndef getsection(request):\r\n '''่Žทๅ–้ƒจ้—จๆ•ฐๆฎ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n user_obj = User.objects.get(pk=user_id)\r\n if Role.objects.get(role_id=user_obj.role_id).role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n page = int(request.GET['page'])\r\n rows = int(request.GET['rows'])\r\n Section_list = list(Section.objects.exclude(pk=user_obj.staff.section_id).values())\r\n total = len(Section_list)\r\n row = Section_list[((page - 1) * rows):(page * rows)]\r\n sectionlist = {\"total\": total, \"rows\": row}\r\n return JsonResponse(sectionlist)\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n\r\n\r\ndef showstaff(request):\r\n '''่ฟ”ๅ›žๅ‘˜ๅทฅ้กต้ข'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n user_obj = User.objects.get(pk=user_id)\r\n if Role.objects.get(pk=user_obj.role_id).role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n role = Role.objects.exclude(role_name='่ถ…็บง็ฎก็†ๅ‘˜')\r\n section = Section.objects.exclude(pk=user_obj.staff.section_id)\r\n position = Position.objects.exclude(pk=user_obj.staff.position_id)\r\n return render(request, 'staff.html',\r\n {'role': role, 'section': section, 'position': position})\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n\r\n\r\ndef getstaff(request):\r\n '''่Žทๅ–ๅ‘˜ๅทฅๆ•ฐๆฎ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n page = int(request.GET['page'])\r\n rows = int(request.GET['rows'])\r\n if Role.objects.get(role_id=User.objects.get(pk=user_id).role_id).role_name == '่ถ…็บง็ฎก็†ๅ‘˜':\r\n Staff_list = list(Staff.objects.exclude(user__role__role_name='่ถ…็บง็ฎก็†ๅ‘˜').values(\r\n 'staff_id', 'staff_name', 'gender', 'birth', 'degree',\r\n 'Email', 'phone_num', 'hiredate', 'school', 'staff_remarks',\r\n 'position_id', 'section_id', 'user_id', 'address'))\r\n for i in Staff_list:\r\n userobj = User.objects.filter(user_id=i['user_id']).first()\r\n i['gender'] = Staff.objects.get(\r\n staff_id=i['staff_id']).get_gender_display()\r\n i['yearday'] = Vacation.objects.get(staff_id=i['staff_id']).yearday\r\n i['illday'] = Vacation.objects.get(staff_id=i['staff_id']).illday\r\n i['user_id'] = userobj.user_name\r\n i['role_name'] = Role.objects.get(\r\n role_id=userobj.role_id).role_name\r\n i['role_id'] = Role.objects.get(role_id=userobj.role_id).role_id\r\n i['staff_id'] = Performance.objects.get(\r\n staff_id=i['staff_id']).basepay\r\n i['section_name'] = Section.objects.filter(\r\n section_id=i['section_id']).first().section_name\r\n i['position_name'] = Position.objects.get(\r\n position_id=i['position_id']).position_name\r\n i['section_id'] = Section.objects.filter(\r\n section_id=i['section_id']).first().section_id\r\n i['position_id'] = Position.objects.get(\r\n position_id=i['position_id']).position_id\r\n total = len(Staff_list)\r\n row = Staff_list[((page - 1) * rows):(page * rows)]\r\n stafflist = {'total': total, 'rows': row}\r\n return JsonResponse(stafflist)\r\n return HttpResponseNotAllowed('ๆ— ๆญคๆƒ้™')\r\n\r\n\r\ndef showupdatepwd(request):\r\n '''่ฟ”ๅ›žไฟฎๆ”นๅฏ†็ ้กต้ข'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'updatepwd.html')\r\n\r\n\r\ndef getupdatepwd(request):\r\n '''ไฟฎๆ”นๅฏ†็ '''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n if request.method == 'POST':\r\n opwd = request.POST.get('opwd')\r\n npwd = request.POST.get('npwd')\r\n getuser = User.objects.get(user_id=request.session['user_id'])\r\n pwd = getuser.user_passwd\r\n if check_password(opwd,pwd):\r\n npwd = make_password(npwd)\r\n getuser.user_passwd = npwd\r\n getuser.save()\r\n return HttpResponse('ไฟฎๆ”นๆˆๅŠŸ')\r\n return HttpResponse('ๅŽŸๅฏ†็ ่พ“ๅ…ฅไธๆญฃ็กฎ๏ผ')\r\n\r\n\r\ndef shownotice(request):\r\n '''่ฟ”ๅ›žๅ‘้€้€š็Ÿฅ้กต้ข'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n return render(request, 'notice.html')\r\n\r\n\r\ndef sendnotice(request):\r\n '''ๅ‘้€้€š็Ÿฅ'''\r\n try:\r\n user_id = request.session['user_id']\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n title = request.POST.get('title')\r\n content = request.POST.get('content')\r\n staff = request.POST.get('staff')\r\n superior = request.POST.get('superior')\r\n isstaff = request.POST.get('isstaff')\r\n issuperior = request.POST.get('issuperior')\r\n if isstaff == 'false' and issuperior == 'false':\r\n return HttpResponse('ๆœช้€‰ๆ‹ฉๆŽฅๆ”ถไบบ๏ผ๏ผ')\r\n str_ = ''\r\n if isstaff == 'true':\r\n num1 = Role.objects.get(role_name=str(staff)).role_id\r\n str_ += str(num1) + ' '\r\n if issuperior == 'true':\r\n num2 = Role.objects.get(role_name=str(superior)).role_id\r\n str_ += str(num2) + ' '\r\n noticeobj = Notice(title=title, content=content,\r\n receiver=str_, sender=User.objects.get(pk=user_id).staff)\r\n noticeobj.save()\r\n except Exception as e:\r\n print('error:',e)\r\n return HttpResponse('ๅ‘้€ๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๅ‘้€ๆˆๅŠŸ')\r\n\r\n\r\ndef addstaff(request):\r\n '''ๆทปๅŠ ๅ‘˜ๅทฅไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n for key, value in request.POST.items():\r\n if not value and key != 'staff_remarks' and key != 'csrfmiddlewaretoken':\r\n return HttpResponse('ไบฒ๏ผŒไธๅกซๆ•ฐๆฎๆƒณๅ•ฅๅ‘ข๏ผ', status=400)\r\n staffnum = request.POST.get('user_id')\r\n for u in User.objects.values('user_name'):\r\n if staffnum == u['user_name']:\r\n return HttpResponse('ๅทฒๅญ˜ๅœจ่ฏฅๅทฅๅท', status=400)\r\n userpwd = request.POST.get('user_passwd')\r\n role_id = request.POST.get('role_id')\r\n section = request.POST.get('section_id')\r\n salary = request.POST.get('staff_id')\r\n job = request.POST.get('position_id')\r\n yearday = request.POST.get('yearday')\r\n illday = request.POST.get('illday')\r\n print(type(yearday))\r\n if int(yearday) < 0 or int(yearday)>100:\r\n return HttpResponse('ๅนดๅ‡ๅกซๅ†™ๆœ‰่ฏฏ!', status=400)\r\n if int(illday) < 0 or int(illday)>100:\r\n return HttpResponse('็—…ๅ‡ๅกซๅ†™ๆœ‰่ฏฏ!', status=400) \r\n degree = request.POST.get('degree')\r\n school = request.POST.get('school')\r\n staffname = request.POST.get('staff_name')\r\n gender = request.POST.get('gender')\r\n hiredate = request.POST.get('hiredate')\r\n birth = request.POST.get('birth')\r\n if datetime.datetime.strptime(hiredate, '%Y-%m-%d') > datetime.datetime.today():\r\n return HttpResponse('ๅ…ฅ่Œๆ—ฅๆœŸๅกซๅ†™ๆœ‰่ฏฏ', status=400)\r\n if datetime.datetime.strptime(birth, '%Y-%m-%d') > datetime.datetime.today():\r\n return HttpResponse('ๅ‡บ็”Ÿๆ—ฅๆœŸๅกซๅ†™ๆœ‰่ฏฏ', status=400)\r\n phonenum = request.POST.get('phone_num')\r\n if len(phonenum) != 11:\r\n return HttpResponse('็”ต่ฏๅท็ ๅฟ…้กปไธบ11ไฝ!', status=400)\r\n Email = request.POST.get('Email')\r\n for e in Staff.objects.values('Email'):\r\n if Email == e['Email']:\r\n return HttpResponse('ๅทฒๅญ˜ๅœจ่ฏฅ้‚ฎ็ฎฑ', status=400)\r\n remarks = request.POST.get('staff_remarks')\r\n address = request.POST.get(\"address\")\r\n userpwd = make_password(userpwd)\r\n with transaction.atomic():\r\n u = User(user_name=staffnum, user_passwd=userpwd,\r\n role_id=role_id)\r\n u.save()\r\n s = Staff(staff_name=staffname, gender=gender, birth=birth,\r\n address=address, degree=degree, Email=Email,\r\n phone_num=phonenum, hiredate=hiredate, school=school,\r\n staff_remarks=remarks, section_id=section,\r\n position_id=job, user_id=u.user_id)\r\n s.save()\r\n sec = Section.objects.get(pk=section)\r\n sec.section_num += 1\r\n sec.save()\r\n p = Performance(\r\n basepay=salary, onday=0, offday=0,\r\n addday=0, outday=0, staff_id=s.staff_id)\r\n p.save()\r\n v = Vacation(yearday=yearday, illday=illday,\r\n staff_id=s.staff_id)\r\n v.save()\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ๆทปๅŠ ๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๆทปๅŠ ๆˆๅŠŸ')\r\n\r\n\r\ndef updstaff(request):\r\n '''ไฟฎๆ”นๅ‘˜ๅทฅไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n for key, value in request.POST.items():\r\n if not value and key != 'staff_remarks' and key != 'csrfmiddlewaretoken' and key != 'user_passwd':\r\n return HttpResponse('ไบฒ๏ผŒไธๅกซๆ•ฐๆฎๆƒณๅ•ฅๅ‘ข๏ผ')\r\n staffnum = request.POST.get('user_id')\r\n userpwd = request.POST.get('user_passwd')\r\n role_id = request.POST.get('role_id')\r\n section = request.POST.get('section_id')\r\n salary = request.POST.get('staff_id')\r\n job = request.POST.get('position_id')\r\n yearday = request.POST.get('yearday')\r\n illday = request.POST.get('illday')\r\n remarks = request.POST.get('staff_remarks')\r\n user_obj = User.objects.filter(user_name=staffnum).first()\r\n staff_obj = user_obj.staff\r\n with transaction.atomic():\r\n if userpwd:\r\n userpwd = make_password(userpwd, 'md5')\r\n user_obj.user_passwd = userpwd\r\n user_obj.role_id = role_id\r\n user_obj.save()\r\n section_obj1 = Section.objects.filter(\r\n section_id=staff_obj.section_id).first()\r\n section_obj1.section_num -= 1\r\n section_obj1.save()\r\n section_obj2 = Section.objects.filter(\r\n section_id=section).first()\r\n section_obj2.section_num += 1\r\n section_obj2.save()\r\n staff_obj.staff_remarks = remarks\r\n staff_obj.section_id = section\r\n staff_obj.position_id = job\r\n staff_obj.save()\r\n Performance.objects.filter(\r\n staff_id=staff_obj, salaryMonth__year=timezone.now().year,\r\n salaryMonth__month=timezone.now().month).update(basepay=salary)\r\n Vacation.objects.filter(staff_id=staff_obj).update(\r\n yearday=yearday, illday=illday)\r\n Message.objects.create(\r\n staff=staff_obj, title='ไธชไบบไฟกๆฏไฟฎๆ”น', content='ๆ‚จ็š„ไธชไบบไฟกๆฏๅทฒ่ขซไฟฎๆ”น๏ผŒ่ฏทๅˆฐไธชไบบไฟกๆฏ้กต้ขๆŸฅ็œ‹๏ผ')\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ไฟฎๆ”นๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ไฟฎๆ”นๆˆๅŠŸ')\r\n\r\n\r\ndef delstaff(request):\r\n '''ๅˆ ้™คๅ‘˜ๅทฅไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n staffnum = request.POST.get('user_name')\r\n with transaction.atomic():\r\n user_obj = User.objects.filter(user_name=staffnum).first()\r\n staff_obj = user_obj.staff\r\n section_obj = Section.objects.filter(\r\n pk=staff_obj.section_id).first()\r\n section_obj.section_num -= 1\r\n section_obj.save()\r\n user_obj.delete()\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ๅˆ ้™คๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๅˆ ้™คๆˆๅŠŸ')\r\n\r\n\r\ndef addsection(request):\r\n '''ๅขžๅŠ ้ƒจ้—จไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n for key, value in request.POST.items():\r\n if not value and key != 'csrfmiddlewaretoken' and key != 'section_remarks':\r\n return HttpResponse('ไบฒ๏ผŒไธๅกซๆ•ฐๆฎๆƒณๅ•ฅๅ‘ข๏ผ')\r\n # section_id = request.POST.get('section_id')\r\n section_name = request.POST.get('section_name')\r\n section_remarks = request.POST.get('section_remarks')\r\n print(section_remarks)\r\n with transaction.atomic():\r\n s = Section(\r\n section_name=section_name, section_remarks=section_remarks)\r\n s.save()\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ๆทปๅŠ ๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๆทปๅŠ ๆˆๅŠŸ')\r\n\r\n\r\ndef updsection(request):\r\n '''ไฟฎๆ”น้ƒจ้—จไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n for key, value in request.POST.items():\r\n if not value and key != 'csrfmiddlewaretoken' and key != 'section_remarks':\r\n return HttpResponse('ไบฒ๏ผŒไธๅกซๆ•ฐๆฎๆƒณๅ•ฅๅ‘ข๏ผ')\r\n section_id = request.POST.get('section_id')\r\n section_name = request.POST.get('section_name')\r\n section_remarks = request.POST.get('section_remarks')\r\n print(request.POST)\r\n with transaction.atomic():\r\n Section.objects.filter(section_id=section_id).update(\r\n section_name=section_name, section_remarks=section_remarks)\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ไฟฎๆ”นๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ไฟฎๆ”นๆˆๅŠŸ')\r\n\r\n\r\ndef delsection(request):\r\n '''ๅˆ ้™ค้ƒจ้—จไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n section_id = request.POST.get('section_id')\r\n with transaction.atomic():\r\n Section.objects.filter(section_id=section_id).delete()\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ๅˆ ้™คๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๅˆ ้™คๆˆๅŠŸ')\r\n\r\n\r\ndef addjob(request):\r\n '''ๅขžๅŠ ่Œไฝไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n for key, value in request.POST.items():\r\n if not value and key != 'csrfmiddlewaretoken' and key != 'position_id':\r\n return HttpResponse('ไบฒ๏ผŒไธๅกซๆ•ฐๆฎๆƒณๅ•ฅๅ‘ข๏ผ')\r\n position_name = request.POST.get('position_name')\r\n section_id = request.POST.get('section_id')\r\n with transaction.atomic():\r\n p = Position(position_name=position_name,\r\n section_id=section_id)\r\n p.save()\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ๆทปๅŠ ๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๆทปๅŠ ๆˆๅŠŸ')\r\n\r\n\r\ndef updjob(request):\r\n '''ไฟฎๆ”น่Œไฝไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n for key, value in request.POST.items():\r\n if not value and key != 'csrfmiddlewaretoken':\r\n return HttpResponse('ไบฒ๏ผŒไธๅกซๆ•ฐๆฎๆƒณๅ•ฅๅ‘ข๏ผ')\r\n position_id = request.POST.get('position_id')\r\n position_name = request.POST.get('position_name')\r\n section_id = request.POST.get('section_id')\r\n print(request.POST)\r\n with transaction.atomic():\r\n Position.objects.filter(position_id=position_id).update(\r\n position_name=position_name, section_id=section_id)\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ไฟฎๆ”นๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ไฟฎๆ”นๆˆๅŠŸ')\r\n\r\n\r\ndef deljob(request):\r\n '''ๅˆ ้™ค่Œไฝไฟกๆฏ'''\r\n try:\r\n eval(\"request.session['user_id']\")\r\n except KeyError:\r\n return redirect('login')\r\n try:\r\n if request.method == 'POST':\r\n position_id = request.POST.get('position_id')\r\n boo = Staff.objects.filter(position_id=position_id).first()\r\n if(boo):\r\n return HttpResponse('ไบฒ๏ผŒไธ่ƒฝ่ฟ™ๆ ทๅญๅˆ ้™คๆ•ฐๆฎๅ“ฆ๏ผ')\r\n else:\r\n with transaction.atomic():\r\n Position.objects.filter(position_id=position_id).delete()\r\n\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse('ๅˆ ้™คๅคฑ่ดฅ')\r\n else:\r\n return HttpResponse('ๅˆ ้™คๆˆๅŠŸ')\r\n", "sub_path": "mscgproject/user/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 46582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "models.User.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.auth.hashers.check_password", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Notice.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Notice.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Notice", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Message.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 33, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "models.User.DoesNotExist", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 59, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 66, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 66, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 70, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 70, "usage_type": "name"}, {"api_name": "models.User.DoesNotExist", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 71, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 72, "usage_type": "call"}, {"api_name": "models.Performance.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 73, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 85, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 94, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 94, "usage_type": "name"}, {"api_name": "models.Vacation.objects.get", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Vacation.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Vacation", "line_number": 95, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 138, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 139, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 139, "usage_type": "name"}, {"api_name": "models.Punch.objects.get", "line_number": 141, "usage_type": "call"}, {"api_name": "models.Punch.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "models.Punch", "line_number": 141, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 141, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 141, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 142, "usage_type": "call"}, {"api_name": "models.Punch.DoesNotExist", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.Punch", "line_number": 143, "usage_type": "name"}, {"api_name": "models.Performance.objects.get", "line_number": 145, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 145, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 145, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 145, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 146, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 146, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 147, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 147, "usage_type": "name"}, {"api_name": "models.Punch.objects.create", "line_number": 148, "usage_type": "call"}, {"api_name": "models.Punch.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "models.Punch", "line_number": 148, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 148, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 148, "usage_type": "name"}, {"api_name": "models.Punch.objects.create", "line_number": 151, "usage_type": "call"}, {"api_name": "models.Punch.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "models.Punch", "line_number": 151, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 151, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 151, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 154, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 155, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 163, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 171, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 174, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 174, "usage_type": "name"}, {"api_name": "models.Vacation.objects.get", "line_number": 175, "usage_type": "call"}, {"api_name": "models.Vacation.objects", "line_number": 175, "usage_type": "attribute"}, {"api_name": "models.Vacation", "line_number": 175, "usage_type": "name"}, {"api_name": "models.Performance.objects.filter", "line_number": 176, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 176, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 176, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 212, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 220, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 221, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 233, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 234, "usage_type": "attribute"}, {"api_name": "models.User.objects.get", "line_number": 237, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 237, "usage_type": "name"}, {"api_name": "models.Performance.objects.get", "line_number": 238, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 238, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 238, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 239, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 239, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 242, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 250, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 251, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 259, "usage_type": "call"}, {"api_name": "models.Out.objects.filter", "line_number": 265, "usage_type": "call"}, {"api_name": "models.Out.objects", "line_number": 265, "usage_type": "attribute"}, {"api_name": "models.Out", "line_number": 265, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 266, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 267, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 267, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 267, "usage_type": "name"}, {"api_name": "models.Out.objects.create", "line_number": 268, "usage_type": "call"}, {"api_name": "models.Out.objects", "line_number": 268, "usage_type": "attribute"}, {"api_name": "models.Out", "line_number": 268, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 271, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 271, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 272, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 272, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 272, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 274, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 274, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 274, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 276, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 276, "usage_type": "name"}, {"api_name": "models.Staff.objects.filter", "line_number": 279, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 279, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 279, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 281, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 281, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 281, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 283, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 284, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 292, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 293, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 301, "usage_type": "call"}, {"api_name": "models.Leave.objects.filter", "line_number": 307, "usage_type": "call"}, {"api_name": "models.Leave.objects", "line_number": 307, "usage_type": "attribute"}, {"api_name": "models.Leave", "line_number": 307, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 308, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 309, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 309, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 309, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 311, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 311, "usage_type": "name"}, {"api_name": "models.Leave.objects.create", "line_number": 313, "usage_type": "call"}, {"api_name": "models.Leave.objects", "line_number": 313, "usage_type": "attribute"}, {"api_name": "models.Leave", "line_number": 313, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 315, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 315, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 315, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 317, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 317, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 317, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 319, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 319, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 319, "usage_type": "name"}, {"api_name": "models.Staff.objects.filter", "line_number": 322, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 322, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 322, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 324, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 324, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 324, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 326, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 327, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 335, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 336, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 344, "usage_type": "call"}, {"api_name": "models.User.objects.exclude", "line_number": 347, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 347, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 347, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 348, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 348, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 348, "usage_type": "name"}, {"api_name": "models.Staff.objects.all", "line_number": 349, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 349, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 349, "usage_type": "name"}, {"api_name": "models.Section.objects.all", "line_number": 350, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 350, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 350, "usage_type": "name"}, {"api_name": "models.Position.objects.all", "line_number": 351, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 351, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 351, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 370, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 378, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 380, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 380, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 380, "usage_type": "name"}, {"api_name": "models.Section.objects.get", "line_number": 383, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 383, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 383, "usage_type": "name"}, {"api_name": "models.Position.objects.get", "line_number": 384, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 384, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 384, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 395, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 405, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 407, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 407, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 407, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 425, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 426, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 434, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 435, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 443, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 447, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 447, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 447, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 448, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 448, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 448, "usage_type": "name"}, {"api_name": "models.Staff.objects.filter", "line_number": 449, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 449, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 449, "usage_type": "name"}, {"api_name": "models.Section.objects.get", "line_number": 454, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 454, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 454, "usage_type": "name"}, {"api_name": "models.Position.objects.get", "line_number": 458, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 458, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 458, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 471, "usage_type": "call"}, {"api_name": "models.Staff.objects.get", "line_number": 494, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 494, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 494, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 497, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 497, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 497, "usage_type": "name"}, {"api_name": "models.Section.objects.get", "line_number": 499, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 499, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 499, "usage_type": "name"}, {"api_name": "models.Position.objects.get", "line_number": 500, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 500, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 500, "usage_type": "name"}, {"api_name": "models.Out", "line_number": 506, "usage_type": "name"}, {"api_name": "models.Leave", "line_number": 509, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 522, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 524, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 524, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 524, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 527, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 527, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 527, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 545, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 546, "usage_type": "call"}, {"api_name": "models.Leave", "line_number": 551, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 553, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 555, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 557, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 565, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 566, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 566, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 566, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 566, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 566, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 566, "usage_type": "name"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 568, "usage_type": "call"}, {"api_name": "models.Leave.objects.get", "line_number": 576, "usage_type": "call"}, {"api_name": "models.Leave.objects", "line_number": 576, "usage_type": "attribute"}, {"api_name": "models.Leave", "line_number": 576, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 577, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 577, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 577, "usage_type": "name"}, {"api_name": "models.Vacation.objects.get", "line_number": 583, "usage_type": "call"}, {"api_name": "models.Vacation.objects", "line_number": 583, "usage_type": "attribute"}, {"api_name": "models.Vacation", "line_number": 583, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 590, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 593, "usage_type": "call"}, {"api_name": "models.Message.objects.create", "line_number": 596, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 596, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 596, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 598, "usage_type": "call"}, {"api_name": "models.Leave.objects.filter", "line_number": 600, "usage_type": "call"}, {"api_name": "models.Leave.objects", "line_number": 600, "usage_type": "attribute"}, {"api_name": "models.Leave", "line_number": 600, "usage_type": "name"}, {"api_name": "models.Leave.objects.get", "line_number": 602, "usage_type": "call"}, {"api_name": "models.Leave.objects", "line_number": 602, "usage_type": "attribute"}, {"api_name": "models.Leave", "line_number": 602, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 603, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 603, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 603, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 607, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 607, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 607, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 609, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 610, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 618, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 619, "usage_type": "call"}, {"api_name": "models.Out", "line_number": 624, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 626, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 628, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 630, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 638, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 639, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 639, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 639, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 639, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 639, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 639, "usage_type": "name"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 641, "usage_type": "call"}, {"api_name": "models.Out.objects.get", "line_number": 649, "usage_type": "call"}, {"api_name": "models.Out.objects", "line_number": 649, "usage_type": "attribute"}, {"api_name": "models.Out", "line_number": 649, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 650, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 650, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 650, "usage_type": "name"}, {"api_name": "models.Performance.objects.get", "line_number": 656, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 656, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 656, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 659, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 659, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 659, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 663, "usage_type": "call"}, {"api_name": "models.Out.objects.get", "line_number": 666, "usage_type": "call"}, {"api_name": "models.Out.objects", "line_number": 666, "usage_type": "attribute"}, {"api_name": "models.Out", "line_number": 666, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 667, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 667, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 667, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 672, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 672, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 672, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 674, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 675, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 681, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 689, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 690, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 690, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 690, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 691, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 691, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 691, "usage_type": "name"}, {"api_name": "models.Section.objects.exclude", "line_number": 692, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 692, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 692, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 693, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 694, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 702, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 703, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 703, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 703, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 704, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 704, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 704, "usage_type": "name"}, {"api_name": "models.Position.objects.exclude", "line_number": 707, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 707, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 707, "usage_type": "name"}, {"api_name": "models.Section.objects.get", "line_number": 710, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 710, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 710, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 717, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 718, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 726, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 727, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 735, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 736, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 736, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 736, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 737, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 737, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 737, "usage_type": "name"}, {"api_name": "models.Section.objects.exclude", "line_number": 740, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 740, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 740, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 744, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 745, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 753, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 754, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 754, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 754, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 755, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 755, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 755, "usage_type": "name"}, {"api_name": "models.Role.objects.exclude", "line_number": 756, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 756, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 756, "usage_type": "name"}, {"api_name": "models.Section.objects.exclude", "line_number": 757, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 757, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 757, "usage_type": "name"}, {"api_name": "models.Position.objects.exclude", "line_number": 758, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 758, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 758, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 759, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 761, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 769, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 772, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 772, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 772, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 772, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 772, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 772, "usage_type": "name"}, {"api_name": "models.Staff.objects.exclude", "line_number": 773, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 773, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 773, "usage_type": "name"}, {"api_name": "models.User.objects.filter", "line_number": 778, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 778, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 778, "usage_type": "name"}, {"api_name": "models.Staff.objects.get", "line_number": 779, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 779, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 779, "usage_type": "name"}, {"api_name": "models.Vacation.objects.get", "line_number": 781, "usage_type": "call"}, {"api_name": "models.Vacation.objects", "line_number": 781, "usage_type": "attribute"}, {"api_name": "models.Vacation", "line_number": 781, "usage_type": "name"}, {"api_name": "models.Vacation.objects.get", "line_number": 782, "usage_type": "call"}, {"api_name": "models.Vacation.objects", "line_number": 782, "usage_type": "attribute"}, {"api_name": "models.Vacation", "line_number": 782, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 784, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 784, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 784, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 786, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 786, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 786, "usage_type": "name"}, {"api_name": "models.Performance.objects.get", "line_number": 787, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 787, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 787, "usage_type": "name"}, {"api_name": "models.Section.objects.filter", "line_number": 789, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 789, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 789, "usage_type": "name"}, {"api_name": "models.Position.objects.get", "line_number": 791, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 791, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 791, "usage_type": "name"}, {"api_name": "models.Section.objects.filter", "line_number": 793, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 793, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 793, "usage_type": "name"}, {"api_name": "models.Position.objects.get", "line_number": 795, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 795, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 795, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 800, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotAllowed", "line_number": 801, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 809, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 810, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 818, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 822, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 822, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 822, "usage_type": "name"}, {"api_name": "django.contrib.auth.hashers.check_password", "line_number": 824, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 825, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 828, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 829, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 837, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 838, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 846, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 856, "usage_type": "call"}, {"api_name": "models.Role.objects.get", "line_number": 859, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 859, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 859, "usage_type": "name"}, {"api_name": "models.Role.objects.get", "line_number": 862, "usage_type": "call"}, {"api_name": "models.Role.objects", "line_number": 862, "usage_type": "attribute"}, {"api_name": "models.Role", "line_number": 862, "usage_type": "name"}, {"api_name": "models.Notice", "line_number": 864, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 865, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 865, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 865, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 869, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 871, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 879, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 884, "usage_type": "call"}, {"api_name": "models.User.objects.values", "line_number": 886, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 886, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 886, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 888, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 898, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 900, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 907, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 907, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 907, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 908, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 909, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 909, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 909, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 910, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 913, "usage_type": "call"}, {"api_name": "models.Staff.objects.values", "line_number": 915, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 915, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 915, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 917, "usage_type": "call"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 920, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 921, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 921, "usage_type": "name"}, {"api_name": "models.User", "line_number": 922, "usage_type": "call"}, {"api_name": "models.Staff", "line_number": 925, "usage_type": "call"}, {"api_name": "models.Section.objects.get", "line_number": 931, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 931, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 931, "usage_type": "name"}, {"api_name": "models.Performance", "line_number": 934, "usage_type": "call"}, {"api_name": "models.Vacation", "line_number": 938, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 943, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 945, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 953, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 958, "usage_type": "call"}, {"api_name": "models.User.objects.filter", "line_number": 968, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 968, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 968, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 970, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 970, "usage_type": "name"}, {"api_name": "django.contrib.auth.hashers.make_password", "line_number": 972, "usage_type": "call"}, {"api_name": "models.Section.objects.filter", "line_number": 976, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 976, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 976, "usage_type": "name"}, {"api_name": "models.Section.objects.filter", "line_number": 980, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 980, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 980, "usage_type": "name"}, {"api_name": "models.Performance.objects.filter", "line_number": 988, "usage_type": "call"}, {"api_name": "models.Performance.objects", "line_number": 988, "usage_type": "attribute"}, {"api_name": "models.Performance", "line_number": 988, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 989, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 989, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 990, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 990, "usage_type": "name"}, {"api_name": "models.Vacation.objects.filter", "line_number": 991, "usage_type": "call"}, {"api_name": "models.Vacation.objects", "line_number": 991, "usage_type": "attribute"}, {"api_name": "models.Vacation", "line_number": 991, "usage_type": "name"}, {"api_name": "models.Message.objects.create", "line_number": 993, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 993, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 993, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 997, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 999, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1007, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1011, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1011, "usage_type": "name"}, {"api_name": "models.User.objects.filter", "line_number": 1012, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 1012, "usage_type": "name"}, {"api_name": "models.Section.objects.filter", "line_number": 1014, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 1014, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1021, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1023, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1031, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1036, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1041, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1041, "usage_type": "name"}, {"api_name": "models.Section", "line_number": 1042, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1047, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1049, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1057, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1062, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1067, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1067, "usage_type": "name"}, {"api_name": "models.Section.objects.filter", "line_number": 1068, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 1068, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 1068, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1072, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1074, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1082, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1086, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1086, "usage_type": "name"}, {"api_name": "models.Section.objects.filter", "line_number": 1087, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 1087, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 1087, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1090, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1092, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1100, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1105, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1108, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1108, "usage_type": "name"}, {"api_name": "models.Position", "line_number": 1109, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1114, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1116, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1124, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1129, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1134, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1134, "usage_type": "name"}, {"api_name": "models.Position.objects.filter", "line_number": 1135, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 1135, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1139, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1141, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1149, "usage_type": "call"}, {"api_name": "models.Staff.objects.filter", "line_number": 1153, "usage_type": "call"}, {"api_name": "models.Staff.objects", "line_number": 1153, "usage_type": "attribute"}, {"api_name": "models.Staff", "line_number": 1153, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1155, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 1157, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 1157, "usage_type": "name"}, {"api_name": "models.Position.objects.filter", "line_number": 1158, "usage_type": "call"}, {"api_name": "models.Position.objects", "line_number": 1158, "usage_type": "attribute"}, {"api_name": "models.Position", "line_number": 1158, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 1162, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 1164, "usage_type": "call"}]} {"seq_id": "202109938", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Report on the current list of projects\"\"\"\n\nimport copy\nfrom docutils import nodes\nfrom docutils.parsers import rst\nfrom docutils import statemachine\nimport os\nfrom sphinx.util import logging\nfrom sphinx.util.nodes import nested_parse_with_titles\nimport yaml\nimport yamlordereddictloader\n\n\nLOG = logging.getLogger(__name__)\n\nIRC_LOG_URL_BASE = 'http://eavesdrop.openstack.org/irclogs/%23'\n\n_projects_yaml = {}\n\n\ndef _get_project_data():\n \"\"\"Return a copy of the project data.\"\"\"\n return copy.deepcopy(_projects_yaml)\n\n\ndef _load_project_file(filename='reference/tsc/projects.yaml'):\n with open(filename, 'r', encoding='utf-8') as f:\n return yaml.load(\n f.read(),\n Loader=yamlordereddictloader.Loader,\n )\n\n\ndef _project_to_rst(name, info):\n\n if 'service' in info:\n title = \"{0} ({1})\".format(name.title(), info['service'])\n elif name == 'I18n':\n title = name\n else:\n title = name.title()\n\n yield '.. _project-%s:' % _slugify(name)\n yield ''\n yield '=' * len(title)\n yield title\n yield '=' * len(title)\n yield ''\n yield ':Home Page: ' + info.get('url', '')\n tl = info.get('tl', {'name': '', 'irc': '', 'email': ''})\n yield ':Technical Lead: %(name)s (``%(irc)s``) <%(email)s>' % tl\n pl = info.get('pl', {'name': '', 'irc': '', 'email': ''})\n yield ':Project Lead: %(name)s (``%(irc)s``) <%(email)s>' % pl\n irc_channel = info.get('irc-channel')\n if irc_channel:\n yield ':IRC Channel: `#%s <%s%s>`__' % (\n irc_channel, IRC_LOG_URL_BASE, irc_channel)\n service = info.get('service')\n if service:\n yield ':Service: ' + service\n yield ''\n mission = info.get('mission', '').rstrip()\n if mission:\n yield \"Mission\"\n yield '-------'\n yield ''\n yield mission\n yield ''\n yield 'Deliverables'\n yield '------------'\n yield ''\n deliverables = info.get('deliverables', [])\n if deliverables:\n for repo_name, deliverable in deliverables.items():\n yield repo_name\n yield '~' * len(repo_name)\n yield ''\n yield ':Repositories: ' + ', '.join(\n ':repo:`%s`' % repo\n for repo in deliverable.get('repos', [])\n )\n yield ''\n tags = deliverable.get('tags', [])\n if tags:\n yield ':Tags:'\n yield ''\n for tag in tags:\n yield ' - :ref:`tag-%s`' % tag\n yield ''\n else:\n yield 'None'\n yield ''\n\n\ndef _slugify(name):\n \"\"\"Convert name to slug form for references.\"\"\"\n return name.lower().replace(' ', '-')\n\n\ndef _write_project_pages(app):\n all_projects = _get_project_data()\n files = []\n for project, info in all_projects.items():\n LOG.info(\"project: %s\" % project)\n slug = _slugify(project)\n filename = 'reference/tsc/projects/%s.rst' % slug\n LOG.info('generating project page for %s' % project)\n with open(filename, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(_project_to_rst(project, info)))\n files.append(filename)\n return files\n\n\nclass ProjectListDirective(rst.Directive):\n\n has_content = False\n\n def run(self):\n all_projects = _get_project_data()\n\n # Build the view of the data to be parsed for rendering.\n result = statemachine.ViewList()\n for project_name in sorted(all_projects.keys()):\n project_info = all_projects[project_name]\n for line in _project_to_rst(project_name, project_info):\n result.append(line, '<' + __name__ + '>')\n\n # Parse what we have into a new section.\n node = nodes.section()\n node.document = self.state.document\n nested_parse_with_titles(self.state, result, node)\n\n return node.children\n\n\ndef setup(app):\n global _projects_yaml\n\n LOG.info('loading projects extension')\n app.add_directive('projectlist', ProjectListDirective)\n\n filename = os.path.abspath('reference/tsc/projects.yaml')\n LOG.info('reading %s' % filename)\n _projects_yaml = _load_project_file(filename)\n _write_project_pages(app)\n", "sub_path": "doc/source/_exts/projects.py", "file_name": "projects.py", "file_ext": "py", "file_size_in_byte": 4787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sphinx.util.logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "sphinx.util.logging", "line_number": 26, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 35, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 40, "usage_type": "call"}, {"api_name": "yamlordereddictloader.Loader", "line_number": 42, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.Directive", "line_number": 126, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst", "line_number": 126, "usage_type": "name"}, {"api_name": "docutils.statemachine.ViewList", "line_number": 134, "usage_type": "call"}, {"api_name": "docutils.statemachine", "line_number": 134, "usage_type": "name"}, {"api_name": "docutils.nodes.section", "line_number": 141, "usage_type": "call"}, {"api_name": "docutils.nodes", "line_number": 141, "usage_type": "name"}, {"api_name": "sphinx.util.nodes.nested_parse_with_titles", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}]} {"seq_id": "224252722", "text": "from __future__ import with_statement, division\nimport xml.etree.ElementTree as et\nfrom xml.dom import minidom\nimport struct\nimport array\nimport sys\nimport re\nimport os\nfrom imctools.io.imcacquisitionbase import ImcAcquisitionBase\nfrom imctools.io.abstractparserbase import AbstractParserBase\nfrom imctools.io.mcdxmlparser import McdXmlParser\nimport imctools.io.mcdxmlparser as mcdmeta\nfrom imctools.io.abstractparserbase import AcquisitionError\n\nfrom collections import defaultdict\n\n\"\"\"\nMain class\n\"\"\"\n\nclass McdParserBase(AbstractParserBase):\n \"\"\"Parsing data from Fluidigm MCD files\n\n The McdParser object should be closed using the close\n :param object:\n :return:\n \"\"\"\n\n def __init__(self, filename, filehandle = None, metafilename=None):\n \"\"\"\n\n :param filename:\n \"\"\"\n AbstractParserBase.__init__(self)\n\n if filehandle is None:\n self._fh = open(filename, mode='rb')\n else:\n self._fh = filehandle\n\n if metafilename is None:\n self._metafh = self._fh\n else:\n self._metafh = open(metafilename, mode='rb')\n self._xml = None\n self._ns = None\n self._acquisition_dict = None\n self.retrieve_mcd_xml()\n self.parse_mcd_xml()\n\n @property\n def filename(self):\n \"Return the name of the open file\"\n return self._fh.name\n\n @property\n def n_acquisitions(self):\n \"\"\"\n Number of acquisitions in the file\n :return:\n \"\"\"\n return len(self.meta.get_acquisitions())\n\n @property\n def acquisition_ids(self):\n \"\"\"\n Acquisition IDs\n :return:\n \"\"\"\n return list(self.meta.get_acquisitions().keys())\n\n def get_acquisition_description(self, ac_id, default=None):\n \"\"\"\n Get the description field of the acquisition\n :param ac_id:\n :return:\n \"\"\"\n acmeta = self.meta.get_acquisition_meta(ac_id)\n desc = acmeta.get(mcdmeta.DESCRIPTION, default)\n return desc\n\n def get_acquisition_buffer(self, ac_id):\n \"\"\"\n Returns the raw buffer for the acquisition\n :param ac_id: the acquisition id\n :return: the acquisition buffer\n \"\"\"\n f = self._fh\n ac = self.meta.get_acquisitions()[ac_id]\n data_offset_start = ac.data_offset_start\n data_offset_end = ac.data_offset_end\n data_size = ac.data_size\n n_rows = ac.data_nrows\n n_channel = ac.n_channels\n f.seek(data_offset_start)\n buffer = f.read(data_size)\n return buffer\n\n def get_acquisition_rawdata(self, ac_id):\n \"\"\"\n Get the acquisition XML of the acquisition with the id\n :param ac_id: the acquisition id\n :return: the acquisition XML\n \"\"\"\n f = self._fh\n\n ac = self.meta.get_acquisitions()[ac_id]\n data_offset_start = ac.data_offset_start\n data_offset_end = ac.data_offset_end\n data_size = ac.data_size\n n_rows = ac.data_nrows\n n_channel = ac.n_channels\n if n_rows == 0:\n raise AcquisitionError('Acquisition ' + ac_id + ' emtpy!')\n\n f.seek(data_offset_start)\n dat = array.array('f')\n dat.fromfile(f, (n_rows * n_channel))\n if sys.byteorder != 'little':\n dat.byteswap()\n data = [dat[(row * n_channel):((row * n_channel) + n_channel)]\n for row in range(n_rows)]\n return data\n\n def _inject_imc_datafile(self, filename):\n \"\"\"\n This function is used in cases where the MCD file is corrupted (missing MCD schema)\n but there is a MCD schema file available. In this case the .schema file can\n be loaded with the mcdparser and then the corrupted mcd-data file loaded\n using this function. This will replace the mcd file data in the backend (containing only\n the schema data) with the real mcd file (not containing the mcd xml).\n \"\"\"\n self.close()\n self._fh = open(filename, mode='rb')\n\n\n def get_nchannels_acquisition(self, ac_id):\n \"\"\"\n Get the number of channels in an acquisition\n :param ac_id:\n :return:\n \"\"\"\n ac = self.meta.get_acquisitions()[ac_id]\n return ac.n_channels\n\n def get_acquisition_channels(self, ac_id):\n \"\"\"\n Returns a dict with the channel metadata\n :param ac_id: acquisition ID\n :return: dict with key: channel_nr, value: (channel_name, channel_label)\n \"\"\"\n ac = self.meta.get_acquisitions()[ac_id]\n channel_dict = ac.get_channel_orderdict()\n return channel_dict\n\n def parse_mcd_xml(self):\n \"\"\"\n Parse the mcd xml into a metadata object\n \"\"\"\n self._meta = McdXmlParser(self.xml)\n\n @property\n def meta(self):\n return self._meta\n\n\n @property\n def xml(self):\n return self._xml\n\n def retrieve_mcd_xml(self, start_str='>> McdParserBase._add_nullbytes('abc')\n 'a\\\\x00b\\\\x00c\\\\x00'\n \"\"\"\n pad_str = ''\n for s in buffer_str:\n pad_str += s + '\\x00'\n return pad_str\n\n @staticmethod\n def _reverse_find_in_buffer(f, s, buffer_size=4096):\n # based on http://stackoverflow.com/questions/3893885/cheap-way-to-search-a-large-text-file-for-a-string\n f.seek(0, 2)\n\n buf = None\n overlap = len(s) - 1\n bsize = buffer_size +overlap+1\n cur_pos = f.tell() - bsize+1\n offset = (-2*bsize+overlap)\n first_start=True\n while cur_pos >= 0:\n f.seek(cur_pos)\n buf = f.read(bsize)\n if buf:\n pos = buf.find(s)\n if pos >= 0:\n return f.tell() - (len(buf) - pos)\n\n cur_pos = f.tell() +offset\n if (cur_pos < 0) and first_start:\n first_start=False\n cur_pos=0\n return -1\n", "sub_path": "imctools/io/mcdparserbase.py", "file_name": "mcdparserbase.py", "file_ext": "py", "file_size_in_byte": 13101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "imctools.io.abstractparserbase.AbstractParserBase", "line_number": 21, "usage_type": "name"}, {"api_name": "imctools.io.abstractparserbase.AbstractParserBase.__init__", "line_number": 34, "usage_type": "call"}, {"api_name": "imctools.io.abstractparserbase.AbstractParserBase", "line_number": 34, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.DESCRIPTION", "line_number": 79, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 79, "usage_type": "name"}, {"api_name": "imctools.io.abstractparserbase.AcquisitionError", "line_number": 114, "usage_type": "call"}, {"api_name": "array.array", "line_number": 117, "usage_type": "call"}, {"api_name": "sys.byteorder", "line_number": 119, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser.McdXmlParser", "line_number": 160, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 202, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree", "line_number": 203, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.replace", "line_number": 203, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 205, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 205, "usage_type": "argument"}, {"api_name": "imctools.io.mcdxmlparser.ACQUISITION", "line_number": 218, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 218, "usage_type": "name"}, {"api_name": "imctools.io.imcacquisitionbase.ImcAcquisitionBase", "line_number": 220, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 226, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 226, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.PANORAMA", "line_number": 238, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 238, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.IMAGESTARTOFFSET", "line_number": 239, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 239, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.IMAGEENDOFFSET", "line_number": 240, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 240, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.IMAGEFORMAT", "line_number": 245, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 245, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser.SLIDE", "line_number": 262, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 262, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.IMAGESTARTOFFSET", "line_number": 263, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 263, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.IMAGEENDOFFSET", "line_number": 264, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 264, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.IMAGEFILE", "line_number": 265, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 265, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path", "line_number": 284, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser.BEFOREABLATIONIMAGESTARTOFFSET", "line_number": 290, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 290, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.BEFOREABLATIONIMAGEENDOFFSET", "line_number": 291, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 291, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.AFTERABLATIONIMAGESTARTOFFSET", "line_number": 298, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 298, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.AFTERABLATIONIMAGEENDOFFSET", "line_number": 299, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 299, "usage_type": "name"}, {"api_name": "imctools.io.mcdxmlparser.ACQUISITION", "line_number": 308, "usage_type": "attribute"}, {"api_name": "imctools.io.mcdxmlparser", "line_number": 308, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path", "line_number": 319, "usage_type": "attribute"}, {"api_name": "imctools.io.abstractparserbase.AcquisitionError", "line_number": 334, "usage_type": "name"}]} {"seq_id": "359277393", "text": "import time\n\nimport pandas as pd\nimport re\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom lmf.dbv2 import db_write\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport requests\nimport json\n\nfrom lch.zhulong import est_tbs,est_meta,est_html\n\n# __conp=[\"postgres\",\"since2015\",\"192.168.3.171\",\"hunan\",\"changsha\"]\n\n# #\n# url=\"http://www.bzztb.gov.cn/BZWZ/jyxx/003001/003001001/003001001001/003001001001001/\"\n# driver=webdriver.Chrome()\n# driver.minimize_window()\n# driver.get(url)\n#\n\n_name_='daqing'\n\ndef f1(driver,num):\n locator = (By.XPATH, '//ul[@class=\"notice-list lf-list1\"]/form/li[1]/a')\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))\n\n cnum = driver.find_element_by_xpath('//div[@class=\"pagination\"]/a[@class=\"current\"]').text\n\n if cnum != str(num):\n url = driver.current_url\n if num ==1:\n url = url.rsplit('/', maxsplit=1)[0] + '/index.htm'\n else:\n url = url.rsplit('/', maxsplit=1)[0] + '/index_' + str(num) + '.htm'\n\n val = driver.find_element_by_xpath('//ul[@class=\"notice-list lf-list1\"]/form/li[1]/a').text\n\n\n driver.get(url)\n\n locator = (By.XPATH, '//ul[@class=\"notice-list lf-list1\"]/form/li[1]/a[not(contains(string(),\"%s\"))]' % val)\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))\n\n data = []\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'lxml')\n div = soup.find('div', class_='infor-con2 on')\n divs = div.find_all('li')\n\n for li in divs:\n href = li.a['href']\n name = li.a.get_text()\n ggstart_time = li.span.get_text()\n if 'http' in href:\n href = href\n else:\n href = 'http://zfcgzx.daqing.gov.cn' + href\n tmp = [name, ggstart_time, href]\n\n data.append(tmp)\n \n df=pd.DataFrame(data=data)\n df[\"info\"] = None\n return df\n\n\ndef f2(driver):\n locator = (By.XPATH, '//ul[@class=\"notice-list lf-list1\"]/form/li[1]/a')\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))\n\n page = driver.find_element_by_xpath('//div[@class=\"pagination\"]/a[last()]').get_attribute('href')\n\n page = re.findall('index_(\\d+).htm', page)[0]\n total = int(page)\n driver.quit()\n\n return total\n\n\ndef f3(driver, url):\n driver.get(url)\n\n locator = (\n By.XPATH, '//div[@class=\"main\"] | //div[@class=\"tab_content\"] | //div[@class=\"officialDoc\"] |'\n ' //table[@class=\"printTable\"] | //div[@class=\"WordSection1\"] | //div[@class=\"content\"] | /html/body/div')\n\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(locator))\n\n before = len(driver.page_source)\n time.sleep(0.1)\n after = len(driver.page_source)\n i = 0\n while before != after:\n before = len(driver.page_source)\n time.sleep(0.1)\n after = len(driver.page_source)\n i += 1\n if i > 5: break\n\n page = driver.page_source\n\n soup = BeautifulSoup(page, 'lxml')\n\n div = soup.find('div', class_='tab_content')\n if div == None:\n div = soup.find('div', class_=\"home-detail\")\n if div == None:\n div = soup.find('div', class_=\"officialDoc\")\n if div == None:\n div = soup.find('table',class_=\"printTable\")\n if div == None:\n div=soup.find('div',class_='WordSection1')\n if div == None:\n div=soup.find('div',class_=\"content\")\n if div == None:\n div = soup.find('body').find('div',class_=None,recursive=False)\n\n if div == None:\n raise ValueError\n\n if 'IPๅทฒ็ป่ฟ‡ไบ†ๆœ‰ๆ•ˆๆœŸ' in div:\n raise TimeoutError\n\n\n\n return div\n\n\n\n\ndata=[\n [\"gcjs_zhaobiao_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxJsgcZbgg/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n [\"gcjs_chengqingbiangeng_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxJsgcBgcggg/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n ###ๅŒ…ๅซไธญๆ ‡๏ผŒไธญๆ ‡ๅ€™้€‰ไบบ๏ผŒๆ”พๅผƒไธญๆ ‡\n [\"gcjs_zhong_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxJsgcZbgs/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n\n [\"zfcg_zhaobiao_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxZfcgCggg/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n [\"zfcg_zhongbiaohx_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxZfcgYzbgg/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n [\"zfcg_zhongbiao_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxZfcgZbgg/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n\n [\"zfcg_liubiao_gg\",\"http://zfcgzx.daqing.gov.cn/jyxxZfcgFbgg/index.htm\",[\"name\",\"ggstart_time\",\"href\",\"info\"],f1,f2],\n\n\n]\n\ndef work(conp,**args):\n est_meta(conp,data=data,diqu=\"้ป‘้พ™ๆฑŸ็œๅคงๅบ†ๅธ‚\",**args)\n est_html(conp,f=f3,**args)\n\nif __name__=='__main__':\n conp = [\"postgres\", \"since2015\", \"192.168.3.171\", \"heilongjiang\", \"daqing\"]\n # conp = [\"postgres\", \"since2015\", \"192.168.3.171\", \"test\", \"lch\"]\n\n work(conp=conp)", "sub_path": "all/heilongjiang/daqing.py", "file_name": "daqing.py", "file_ext": "py", "file_size_in_byte": 5414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 50, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 78, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 82, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 93, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 93, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 96, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 96, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 96, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 111, "usage_type": "call"}, {"api_name": "lch.zhulong.est_meta", "line_number": 156, "usage_type": "call"}, {"api_name": "lch.zhulong.est_html", "line_number": 157, "usage_type": "call"}]} {"seq_id": "594427782", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\t\turl(r'^$', views.home, name = 'home'),\n\t\turl(r'^prepareExam/$', views.prepareExam, name = 'prepareExam'),\n\t\turl(r'^submitNewExamForm/$', views.submitNewExamForm, name='submitNewExamForm'),\n\t\turl(r'^play/$', views.play, name='play'),\n\t\turl(r'^stop/$', views.stop, name='stop'),\n\t\turl(r'^nodecount/$', views.nodeCount, name='nodecount'),\n\t\turl(r'^nodestatus/$', views.nodeStatus, name='nodestatus'),\n]", "sub_path": "AdminTools/src/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}]} {"seq_id": "228514143", "text": "import json\nimport re\nimport time\nimport urllib.parse as urlparser\nfrom pathlib import Path\nfrom pprint import pprint\nfrom time import sleep\nfrom warnings import warn\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\n\nclass PageParser:\n \"\"\"\n ๅœจ่ฟ™ไธช็ฑป้‡Œ้ขๅšไธๅŒๅŸŸๅ็š„้กต้ข่งฃๆž,\n ๅฏนไบŽๅ†™ๅฅฝ็š„ๆ–นๆณ•ๅœจ __init__ ้‡Œ้ข็š„ๆ–นๆณ•่กจไธŠ็™ป่ฎฐ\n \"\"\"\n\n def __init__(self):\n self.parse_method_table = {\n # \"www.example.com\": PageParser.method\n \"www.channelnewsasia.com\": self.www_channelnewsasia_com,\n \"www.straitstimes.com\": self.www_straitstimes_com\n\n }\n\n def __call__(self, domain, text) -> dict:\n parse_method = self.parse_method_table.get(domain)\n if not parse_method:\n # warn(\"ๆฒกๆœ‰็”จๆฅ่งฃๆžๅŸŸๅ {} ้กต้ข็š„ๆ–นๆณ•!\".format(domain))\n parse_method = self.www_default_com\n\n try:\n return parse_method(text)\n except Exception:\n warn(\"้กต้ขๅ†…ๅฎน่งฃๆžๅคฑ่ดฅ: {}\".format(domain))\n return {\n \"Type\": \"\",\n \"Time\": time.strftime(\"%Y-%m-%d\", time.gmtime()),\n \"Headline\": \"\",\n \"Text\": \"\",\n \"Section\": \"\",\n \"Writers\": \"\",\n \"URL\": \"\",\n \"MainKeyWord\": \"\",\n \"AdditionalKeyWord\": \"\"\n }\n\n def www_default_com(self, text) -> dict:\n result = {\n \"Type\": \"\",\n \"Time\": time.strftime(\"%Y-%m-%d\", time.gmtime()),\n \"Headline\": \"\",\n \"Text\": \"\",\n \"Section\": \"\",\n \"Writers\": \"\",\n \"URL\": \"\",\n \"MainKeyWord\": \"\",\n \"AdditionalKeyWord\": \"\"\n }\n soup = BeautifulSoup(text, \"lxml\")\n text = \"\"\n ps = soup.find_all(\"p\")\n for p in ps:\n text += p.text\n result[\"Headline\"] = soup.title.string\n result[\"Text\"] = text\n return result\n\n def www_straitstimes_com(self, text) -> dict:\n result = {\n \"Type\": \"\",\n \"Time\": time.strftime(\"%Y-%m-%d\", time.gmtime()),\n \"Headline\": \"\",\n \"Text\": \"\",\n \"Section\": \"\",\n \"Writers\": \"\",\n \"MainKeyWord\": \"\",\n \"AdditionalKeyWord\": \"\"\n }\n soup = BeautifulSoup(text, \"lxml\")\n date = soup.find(\"li\", {\"class\": \"story-postdate\"})\n if date is not None and date.content is not None:\n result[\"Time\"] = date.content[1]\n result[\"Headline\"] = soup.title.string\n result[\"Text\"] = \"\".join([i.text for i in soup.find_all(\"p\")])\n return result\n\n def www_channelnewsasia_com(self, text) -> dict:\n result = {\n \"Type\": \"\",\n \"Time\": time.strftime(\"%Y-%m-%d\", time.gmtime()),\n \"Headline\": \"\",\n \"Text\": \"\",\n \"Section\": \"\",\n \"Writers\": \"\",\n \"MainKeyWord\": \"\",\n \"AdditionalKeyWord\": \"\"\n }\n article = BeautifulSoup(text, \"lxml\").find(\"article\")\n if article is None:\n return self.www_default_com(text)\n result[\"Type\"] = (\n article.find(\"header\")\n .find(\"span\", {\"class\": \"article__category\"})\n .get_text(strip=True)\n )\n result[\"Headline\"] = article.find(\"h1\").get_text(strip=True)\n result[\"Text\"] = \"\".join([\n p.get_text(strip=True)\n for p in article.find_all(\"p\")\n ])\n result[\"MainKeyWord\"] = \",\".join([\n li.get_text(strip=True)\n for li in article.find(\"footer\").find(\"ul\").find_all(\"li\")\n ])\n\n return result\n\n\nclass GoogleNews(requests.Session):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.url_topstories = \"https://news.google.com/topstories\"\n self.pageparser = PageParser()\n\n def get(self, url, **kwargs):\n try:\n return super().get(url, **kwargs)\n except Exception as e:\n print(e)\n warn(\"Error in response: {}\".format(url))\n return None\n\n def get_article_urls(self, hl) -> list:\n \"\"\"่Žทๅพ—ๆ‰€ๆœ‰ๆ–‡็ซ ็š„ url\n\n Args:\n hl: ๅ›ฝๅฎถๅœฐๅŒบ, ๆฅ่‡ชไบŽ url ไธญ็š„ hl ๅ‚ๆ•ฐ, ๆฏ”ๅฆ‚ \"en-SG\", \"en-ID\", ...\n\n Returns:\n ไธ€ไธชๅˆ—่กจ, ๅŒ…ๅซๆ‰€ๆœ‰ๆ–‡็ซ ็š„็œŸๅฎž url ๅญ—็ฌฆไธฒ\n \"\"\"\n\n # ่Žทๅ–้ฆ–้กต\n response = self.get(self.url_topstories, params={\"hl\": hl})\n\n if not response:\n warn(\"ๆ— ๅ“ๅบ”, ๆฃ€ๆŸฅ็ฝ‘็ปœ\")\n return []\n sleep(0.1)\n # Path(\"a.html\").write_text(response.text, \"utf8\")\n\n # ่Žทๅ– More Headlines\n Path(\"./tmp/a.html\").write_text(response.text, encoding=\"utf8\")\n url_more_headlines = BeautifulSoup(response.text, \"lxml\").find(\n text=\"More Headlines\").parent.get(\"href\")\n response = self.get(urlparser.urljoin(\n response.url, url_more_headlines))\n if not response:\n warn(\"ๆ— ๅ“ๅบ”, ๆฃ€ๆŸฅ็ฝ‘็ปœ\")\n return []\n sleep(0.1)\n # Path(\"b.html\").write_text(response.text, \"utf8\")\n\n # ่Žทๅ– Google ็š„ๆ–‡็ซ ไผช้“พๆŽฅ\n url_articles = [\n # ่ฟ™้‡ŒๅŠ ไบ†ไธ€ไธช็‚น, ไธ็Ÿฅ้“ไธบไป€ไนˆurl็š„็›ธๅฏน่ทฏๅพ„ๆœ‰้—ฎ้ข˜\n urlparser.urljoin(response.url, \".\"+e.a[\"href\"])\n for e in BeautifulSoup(response.text, \"lxml\").find_all(\"article\")\n ]\n\n # ่Žทๅ–ๆ–‡็ซ ็š„็œŸๅฎž้“พๆŽฅ\n url_real = []\n for url in tqdm(url_articles, desc=\"่Žทๅ–ๆ–‡็ซ ็œŸๅฎž้“พๆŽฅ\", ncols=100):\n response = self.get(url)\n if not response:\n continue\n sleep(0.1)\n # Path(\"c.html\").write_text(response.text, \"utf8\")\n url_real.append(BeautifulSoup(\n response.text, \"lxml\").find(\"noscript\").a.get(\"href\"))\n\n return url_real\n\n def get_article(self, url_article) -> dict:\n \"\"\"่ฟ”ๅ›ž็ป“ๆž„ๅŒ–็š„ๆ–‡็ซ ๆ•ฐๆฎ\"\"\"\n # XXX: ่ฟ™ๅœฐๆ–นๅฆ‚ๆžœๆœ‰requests่งฃๅ†ณไธไบ†็š„ url, ๅฏไปฅ้€š่ฟ‡urlparseๅˆคๆ–ญๅŸŸๅ, ็„ถๅŽ็”จๆ— ๅคดๆต่งˆๅ™จๅŽป่ฎฟ้—ฎ้กต้ข\n # ๆ— ๅคดๆต่งˆๅ™จ้ƒจๅˆ†ๆš‚ๆ—ถๆฒกๅ†™, ๅฆ‚ๆžœๅพˆๅคš้ƒฝๆฒกๅŠžๆณ•็”จ requests ่Žทๅ–, ๅฏ่ƒฝไนŸ่ฆๅŠ ไธ€ไธช่กจๆฅ่ฎฐๅฝ•ๅ“ชไบ›ๅŸŸๅ่ฆ็”จๆ— ๅคดๆต่งˆๅ™จ่งฃๆž\n response = self.get(url_article)\n sleep(0.1)\n\n # ็ป™ parser ่งฃๆž\n result = self.pageparser(\n urlparser.urlparse(url_article).netloc,\n (response.text if response else \"\")\n )\n result[\"URL\"] = url_article # ่กฅๅ……ไธคไธชๆ•ฐๆฎ\n result[\"Source\"] = \"google\"\n return result\n\n\ndef get_domains(urls) -> set:\n \"\"\"่ฟ”ๅ›žไธ้‡ๅค็š„ๆ‰€ๆœ‰ๅŸŸๅ้›†ๅˆ\"\"\"\n return set(urlparser.urlparse(url).netloc for url in urls)\n\n\nif __name__ == \"__main__\":\n spider = GoogleNews()\n spider.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0\",\n }\n\n url_articles = spider.get_article_urls(\"en-SG\")\n pprint(url_articles)\n with Path(\"urls.json\").open(\"w\", encoding=\"utf8\") as f:\n json.dump(url_articles, f)\n\n text = spider.get_article(url_articles[0])\n with Path(\"example.json\").open(\"w\", encoding=\"utf8\") as f:\n json.dump(text, f, ensure_ascii=False)\n", "sub_path": "crawlers/scripts/urllib_crawlers/crawler_google.py", "file_name": "crawler_google.py", "file_ext": "py", "file_size_in_byte": 7364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "warnings.warn", "line_number": 38, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 54, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 54, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 63, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 75, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 75, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 83, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 94, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 94, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 102, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 123, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 134, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 151, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 157, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 158, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 160, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 160, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 163, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 171, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 171, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 172, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 177, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 183, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 193, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 197, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 197, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 207, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 207, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 217, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 218, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 219, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 222, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 223, "usage_type": "call"}]} {"seq_id": "62331019", "text": "import requests\nimport configparser\n\ncp = configparser.RawConfigParser()\ncp.read(r'config.cfg')\napikey = cp.get('main-config', 'apikey')\n\nlistFile = open('list.txt', 'r');\nfor line in listFile:\n\tline = line.strip()\n\tprint('Downloading', line)\n\tlr = None\n\ttry:\n\t\tlr = requests.head(line)\n\texcept:\n\t\tprint('Just URL check, ignore this')\n\tapilink = None\n\tif (lr != None and lr.status_code < 400):\n\t\tapilink = apilink = 'www.youtubeinmp3.com/fetch/?format=JSON&video=' + line\n\telse:\n\t\tytr = requests.get('https://www.googleapis.com/youtube/v3/search?part=snippet&q=' + line.replace(' ', '+') + '&key=' + apikey + '&maxesults=2')\n\t\tvid = ytr.json()['items'][0]['id']['videoId']\n\t\tvt = ytr.json()['items'][0]['snippet']['title']\n\t\tapilink = 'https://www.youtubeinmp3.com/fetch/?format=JSON&video=http://www.youtube.com/watch?v=' + vid\n\tyfr = requests.get(apilink)\n\tyfrj = yfr.json()\n\tt = yfrj['title']\n\tl = yfrj['length']\n\tdl = yfrj['link']\n\tsr = requests.get(dl)\n\tb = bytes(sr.content)\n\tprint('Downloaded', t, 'with length', l, 'and byte length', len(b))\n\tsf = open('dl\\\\' + t + '.mp3', 'wb')\n\tsf.write(b)\n\tprint('Downloaded song')\nprint('Download complete')", "sub_path": "YoutubeListDL/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1153, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "configparser.RawConfigParser", "line_number": 4, "usage_type": "call"}, {"api_name": "requests.head", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}]} {"seq_id": "407035035", "text": "import pickle\r\nimport pandas\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.impute import SimpleImputer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\ndata_set = pandas.read_csv('FTBorrower.csv')\r\nX = data_set.iloc[:, [5, 6, 7, 8, 9, 10]]\r\ny = data_set.iloc[:, [11]]\r\n\r\nX = pandas.DataFrame(SimpleImputer().fit_transform(X))\r\ny = pandas.DataFrame(SimpleImputer().fit_transform(y))\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01, shuffle=False)\r\n\r\nscalar = StandardScaler().fit(X_train)\r\nprint(type(scalar))\r\n\r\nwith open('scalar.pkl', 'wb') as file:\r\n pickle.dump(scalar, file)\r\n\r\nwith open('scalar.pkl', 'rb') as file:\r\n nscalar = pickle.load(file)\r\n\r\nprint(type(nscalar))\r\n\r\nX_train = pandas.DataFrame(nscalar.transform(X_train))\r\nX_test = pandas.DataFrame(StandardScaler().fit_transform(X_test))\r\n\r\nclassifier = RandomForestClassifier(criterion='entropy').fit(X_train, y_train.values.ravel())\r\n\r\nwith open('model.pkl', 'wb') as file:\r\n pickle.dump(classifier, file)\r\n\r\nwith open('model.pkl', 'rb') as file:\r\n pickle_model = pickle.load(file)\r\n\r\nprint(X_test.shape[1])\r\n\r\ny_predict = pickle_model.predict(X_test)\r\n", "sub_path": "TrainCRA.py", "file_name": "TrainCRA.py", "file_ext": "py", "file_size_in_byte": 1236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 17, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 21, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 31, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 37, "usage_type": "call"}]} {"seq_id": "214608592", "text": "from multiprocessing.pool import Pool\n\nimport pandas as pd\n\nfrom datetime import timedelta\nimport numpy as np\n\nfrom tqdm import tqdm\n\nCLUSTER_SIZE = 20000\nDATASET = \"bip_assignment/dataset.csv\"\n\n\n\ndef compute_weather(key):\n dataset = pd.read_csv(\"bip_assignment/dataset_stations.csv\")\n\n weather_df = pd.read_csv(\"bip_assignment/weather_train.csv\")\n\n dataset['DATETIME_UTC'] = pd.to_datetime(dataset['DATETIME_UTC'])\n dataset['START_DATETIME_UTC'] = pd.to_datetime(dataset['START_DATETIME_UTC'])\n dataset['END_DATETIME_UTC'] = pd.to_datetime(dataset['END_DATETIME_UTC'])\n\n weather_df['DATETIME_UTC'] = pd.to_datetime(weather_df['DATETIME_UTC'])\n\n\n print(key)\n\n group = dataset[dataset.STATION_ID_2 == key]\n\n weather_relevant = weather_df[weather_df.ID == key]\n\n for i in tqdm(group.index):\n #line=group[group.index == i]\n\n weather_very_relevant = weather_relevant[(group.at[i,'DATETIME_UTC'] >= weather_relevant.DATETIME_UTC - timedelta(hours=12))\n & (group.at[i,'DATETIME_UTC'] <= weather_relevant.DATETIME_UTC + timedelta(hours=12))]\n weather_very_relevant[\"TIME_WEATHER_DELTA\"] = abs(weather_relevant.DATETIME_UTC - group.at[i,'DATETIME_UTC'])\n #print(weather_very_relevant.head())\n index = int(weather_very_relevant[[\"TIME_WEATHER_DELTA\"]].idxmin())\n #print(index)\n group.loc[i, 'TEMPERATURE'] = weather_very_relevant.at[index, \"TEMPERATURE\"]\n group.loc[i, 'MAX_TEMPERATURE'] = weather_very_relevant.at[index, \"MAX_TEMPERATURE\"]\n group.loc[i, 'MIN_TEMPERATURE'] = weather_very_relevant.at[index, \"MIN_TEMPERATURE\"]\n group.loc[i, 'WEATHER'] = weather_very_relevant.at[index, \"WEATHER\"]\n group.loc[i, 'DATETIME_UTC_WEATHER'] = weather_very_relevant.at[index, \"DATETIME_UTC\"]\n\n group.to_csv(\"dataset_station1_\"+key+\".csv\")\n\n\n\n\n\ndataset_total = pd.read_csv(\"bip_assignment/dataset_2.csv\")\n\nstations = set(dataset_total[\"STATION_ID_2\"])\n\nprint(len(set(dataset_total[\"STATION_ID_2\"])))\n\np = Pool(8)\np.map(compute_weather, tqdm(stations))\n\n", "sub_path": "parallel-attach-weather-2.py", "file_name": "parallel-attach-weather-2.py", "file_ext": "py", "file_size_in_byte": 2095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 24, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "multiprocessing.pool.Pool", "line_number": 60, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 61, "usage_type": "call"}]} {"seq_id": "234218100", "text": "# -*- coding: utf-8 -*-\r\nfrom unittest import mock\r\nimport unittest\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport parsers\r\nfrom parsers.cte.redzinska import RedzinskaParser\r\nfrom parsers.vparsers import StatusParser\r\n\r\nfrom tests.parsers import ParserTestCaseMixin\r\n\r\n\r\n@mock.patch.object(parsers.cte.redzinska.SingleRequestLoaderMixin, \"load\")\r\nclass RedzinskaParserTest(ParserTestCaseMixin, unittest.TestCase):\r\n \r\n content = [\r\n BeautifulSoup(\r\n \"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n
0/01l.pokoi: 3 cena: 294 264 PLN metraลผ: 53,6 mยฒ
ogrรณd: 83 mยฒ
zarezerwowane\t \r\n \r\n \"wiฤ™cej\"\r\n \r\n
0/02l.pokoi: 3 cena: 300 183 PLN metraลผ: 53,7 mยฒ
ogrรณd: 154 mยฒ
zarezerwowane\t \r\n \r\n \"wiฤ™cej\"\r\n \r\n
\r\n \"\"\", \"lxml\")\r\n ]\r\n content_empty = [ BeautifulSoup(\"\", \"lxml\") ]\r\n \r\n parser = RedzinskaParser\r\n records_count = 2\r\n test_record_index = 0\r\n test_record = {\r\n \"number\": \"0/01\",\r\n \"rooms\": 3,\r\n \"area\": 53.6,\r\n \"garden\": 83.0,\r\n \"balcony\": 0.0,\r\n \"price\": 294264.00,\r\n \"status\": StatusParser.RESERVED,\r\n \"plan\": \"http://cte-i.pl/pl/flat-info/356/121\",\r\n \"fid\": \"0/01\",\r\n \"floor\": 0\r\n }\r\n", "sub_path": "tests/parsers/test_redzinska.py", "file_name": "test_redzinska.py", "file_ext": "py", "file_size_in_byte": 2602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tests.parsers.ParserTestCaseMixin", "line_number": 15, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 15, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 56, "usage_type": "call"}, {"api_name": "parsers.cte.redzinska.RedzinskaParser", "line_number": 58, "usage_type": "name"}, {"api_name": "parsers.vparsers.StatusParser.RESERVED", "line_number": 68, "usage_type": "attribute"}, {"api_name": "parsers.vparsers.StatusParser", "line_number": 68, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 14, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 14, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 14, "usage_type": "name"}, {"api_name": "parsers.cte", "line_number": 14, "usage_type": "attribute"}]} {"seq_id": "30851240", "text": "import datetime\nimport json\n\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.translation import ugettext as _\n\nfrom models import *\nimport forms\n\n\ndef home(request):\n places = Place.objects.all().order_by(\"?\")[:3]\n total_places_count = Place.objects.all().count()\n latest_photos = Photo.objects.filter().order_by(\"-when\")[:4]\n return render_to_response(\"app/home.html\",\n {\"featured_places\" : places,\n \"latest_photos\" : latest_photos, \n \"total_places_count\" : total_places_count, },\n context_instance=RequestContext(request))\n \n \ndef invitation_request(request):\n message = None\n if request.method == \"POST\":\n form = forms.InvitationRequestForm(request.POST)\n if form.is_valid():\n form.save()\n message = _(\"Thanks for signing up!\")\n else:\n form = forms.InvitationRequestForm()\n return render_to_response(\"app/invitation_request.html\",\n {\"form\" : form, \n \"message\" : message, },\n context_instance=RequestContext(request)) \n\n\ndef sign_up(request, invitation_key):\n error_message = None\n \n invitation = get_object_or_404(Invitation, key=invitation_key)\n \n if request.method == \"POST\":\n signup_form = forms.SignupForm(request.POST)\n if signup_form.is_valid():\n \n if User.objects.filter(username=signup_form.cleaned_data[\"username\"]).count() > 0:\n error_message = _(\"Username is already taken\")\n else:\n if User.objects.filter(email=signup_form.cleaned_data[\"email\"]).count() > 0:\n error_message = _(\"Email is already registered\")\n else:\n new_user = User.objects.create_user(signup_form.cleaned_data[\"username\"],\n signup_form.cleaned_data[\"email\"],\n signup_form.cleaned_data[\"new_password\"])\n invitation.used = datetime.datetime.now()\n invitation.used_by_user = new_user\n invitation.save()\n user = authenticate(username=signup_form.cleaned_data[\"username\"],\n password=signup_form.cleaned_data[\"new_password\"])\n login(request, user)\n return HttpResponseRedirect(reverse(\"app.views.user\", args=[user.username]))\n else:\n signup_form = forms.SignupForm(initial={\"email\" : invitation.email, })\n \n return render_to_response(\"app/sign_up.html\",\n {\"signup_form\" : signup_form, \n \"error_message\" : error_message},\n context_instance=RequestContext(request))\n\n\ndef sign_in(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n pass\n else:\n # Account disabled\n pass\n else:\n pass\n # Invalid password\n return HttpResponseRedirect(reverse(\"app.views.home\"))\n\n\ndef sign_out(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"app.views.home\"))\n\n\ndef list(request, show_username=None):\n if show_username is not None:\n show_user = get_object_or_404(User, username=show_username)\n else:\n show_user = None\n \n places = Place.objects.all()\n return render_to_response(\"app/list_places.html\",\n {\"places\" : places,\n \"show_user\" : show_user, },\n context_instance=RequestContext(request))\n\n\ndef list_ajax(request, show_username=None):\n if show_username is not None:\n show_user = get_object_or_404(User, username=show_username)\n else:\n show_user = None\n \n places = Place.objects.all()\n \n result = []\n for place in places:\n result.append({\"place_id\" : place.id,\n \"name\" : place.name,\n \"small_card\" : render_to_string(\"app/snippet_small_card.html\", {\"place\" : place, \"show_user\" : show_user,}),\n \"location_lat\" : place.location_lat,\n \"location_lon\" : place.location_lon,\n })\n \n return HttpResponse(json.dumps(result), mimetype=\"application/json\")\n\n\ndef map(request, show_username=None):\n if show_username is not None:\n show_user = get_object_or_404(User, username=show_username)\n else:\n show_user = None\n \n places = Place.objects.all()\n return render_to_response(\"app/map.html\",\n {\"places\" : places,\n \"show_user\" : show_user, \n \"places_ajax_list_url\" : reverse(\"app.views.list_ajax\", kwargs=[]),\n },\n context_instance=RequestContext(request))\n\n\ndef place(request, id):\n place = get_object_or_404(Place, id=id)\n sections = Section.objects.filter(place=place).order_by(\"section\")\n return render_to_response(\"app/place.html\",\n {\"place\" : place,\n \"sections\" : sections,\n \"upload_form\" : forms.PhotoForm(),},\n context_instance=RequestContext(request))\n \n\n@login_required \ndef upload(request, place_id):\n place = get_object_or_404(Place, id=place_id)\n if request.method == \"POST\":\n form = forms.PhotoForm(request.POST, request.FILES)\n if form.is_valid():\n photo = form.save(commit=False)\n photo.place = place\n photo.taken_by = request.user\n photo.save()\n return HttpResponseRedirect(reverse(\"app.views.place\", args=[place_id]))\n else:\n form = forms.PhotoForm()\n return render_to_response(\"app/upload.html\",\n {\"place\" : place,\n \"form\" : form},\n context_instance=RequestContext(request))\n\ndef user(request, username):\n show_user = get_object_or_404(User, username=username)\n latest_photos = Photo.objects.filter(taken_by=show_user).order_by(\"-when\")[:4]\n places_visited = Place.objects.filter(photo__taken_by=show_user).distinct().count()\n total_places_count = Place.objects.all().count()\n\n edit_form = None\n hide_edit = True\n error_message = None\n \n if request.user == show_user:\n profile = show_user.get_profile()\n if request.method == \"POST\":\n edit_form = forms.EditUserForm(request.POST)\n if edit_form.is_valid():\n if User.objects.filter(email=edit_form.cleaned_data[\"email\"]).exclude(id=show_user.id).count() == 0:\n \n show_user.email = edit_form.cleaned_data[\"email\"]\n show_user.save()\n profile.title = edit_form.cleaned_data[\"title\"]\n profile.website = edit_form.cleaned_data[\"website\"]\n profile.bio = edit_form.cleaned_data[\"bio\"]\n profile.let_others_see_email = edit_form.cleaned_data[\"let_others_see_email\"]\n profile.organization = edit_form.cleaned_data[\"organization\"]\n profile.organization_website = edit_form.cleaned_data[\"organization_website\"]\n profile.save()\n else:\n error_message = _(\"Email is already registered\")\n hide_edit = False\n else:\n hide_edit = False\n else:\n edit_form = forms.EditUserForm(initial={\"email\" : show_user.email,\n \"let_others_see_email\" : profile.let_others_see_email,\n \"title\" : profile.title,\n \"website\" : profile.website,\n \"bio\" : profile.bio,\n \"organization\" : profile.organization,\n \"organization_website\" : profile.organization_website})\n \n return render_to_response(\"app/user.html\",\n {\"show_user\" : show_user,\n \"latest_photos\" : latest_photos,\n \"total_places_count\" : total_places_count,\n \"places_visited\" : places_visited,\n \"edit_form\" : edit_form, \n \"hide_edit\" : hide_edit, \n \"error_message\" : error_message, },\n context_instance=RequestContext(request))\n\n\ndef help(request, page):\n return render_to_response(\"app/help/%s.html\" % page,\n {},\n context_instance=RequestContext(request))\n", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.shortcuts.render_to_response", "line_number": 22, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 26, "usage_type": "call"}, {"api_name": "forms.InvitationRequestForm", "line_number": 32, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 35, "usage_type": "call"}, {"api_name": "forms.InvitationRequestForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 38, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 47, "usage_type": "call"}, {"api_name": "forms.SignupForm", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 53, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 59, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 67, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 68, "usage_type": "call"}, {"api_name": "forms.SignupForm", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 72, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 75, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 84, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 92, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 96, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 97, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 102, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 102, "usage_type": "argument"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 107, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 115, "usage_type": "argument"}, {"api_name": "django.template.loader.render_to_string", "line_number": 125, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 135, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 135, "usage_type": "argument"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 140, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 143, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 145, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 151, "usage_type": "call"}, {"api_name": "forms.PhotoForm", "line_number": 154, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 155, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 160, "usage_type": "call"}, {"api_name": "forms.PhotoForm", "line_number": 162, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 168, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 168, "usage_type": "call"}, {"api_name": "forms.PhotoForm", "line_number": 170, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 171, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 174, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 158, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 177, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 177, "usage_type": "argument"}, {"api_name": "forms.EditUserForm", "line_number": 189, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 191, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 191, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 191, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 203, "usage_type": "call"}, {"api_name": "forms.EditUserForm", "line_number": 208, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 216, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 224, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 228, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 230, "usage_type": "call"}]} {"seq_id": "629202016", "text": "###A python code to extract the indices of a cluster which the diffusion of the particle happens within\n###and calculate the radius of gyration of this very cluster\n\nimport multiprocessing as mp\t\t\t\t#Import necessary libraries\nimport numpy as np\nfrom os import chdir\n\n#filepath = '/home/yousof/Percolation/Latt_1000/'\t#Path to the root directiory of files\nfilepath = input('Please give the directory path: ')\nchdir(filepath)\t\t\t\t\t\t#Change to the working directory\n\nZ = np.loadtxt('initial_pos')\t\t\t\t#Load the file that contains initial positions of the traces in different simulations\nN = 3006\t\t\t\t\t\t#Number of simulations\nCPU_n = 6\t\t\t\t\t\t#Number of CPUs to be used for the parallel computing\nindex_list = range(0,N)\t\t\t\t\t#Index of the files which its range is the number of simulations\n\n\n#Defining a function which extract the indices of the percolation cluster\ndef percolation_culster_identifier(k):\n\tinputfile = 'label_%d' %(k)\t\t\t#The input file is a file contains inforamtion about the labels of different cluster\n\toutput = 'indices_%d' %(k)\t\t\t#Naming the output file, it simply contains information about the indices of the cluster\n\tchdir(filepath+'labelboard')\t\t\t#Change directory to the one contains the labelling boards\n\tX = np.loadtxt(inputfile)\t\t\t#Load the labelled board\n\tchdir(filepath+'percolation_cluster_indices')\t#Change the directory to the one where the output files are stored\n\tindex_file = open(output, 'w')\t\t\t#Open a stream file for the purpose of writing on output files\n\n\tfor i in range(0,len(X)):\t\t\t#A for loop goes through all the cells of the label board and catch those having the same index as the staring point of the diffusion process\n\t\tfor j in range(0,len(X)):\n\t\t\tif (X[i,j] == X[Z[k,0],Z[k,1]]):\n\t\t\t\tindex_file.write(str(i) + '\\t' + str(j) + '\\n')\n\tindex_file.close()\t\t\t\t#Close the output file\n\tprint(output)\t\t\t\t\t#Print the name of the output file\n\n#Defining a function which calculates the radius of gyration of the cluster in which the diffusion process happens\ndef gyration(filename):\n\tchdir(filepath+'percolation_cluster_indices')\t#Change the working directory to the one where the indices of the cluster are stored\n\tK = np.loadtxt(filename)\t\t\t#Load the files contain the indices for different simulation instances\n\tK = K.reshape(np.product(K.shape)/2,2)\t\t#Reshape the array to 2xN where N is the size of cluster\n\tprint(filename)\n\tR_2_g = 0\t\t\t\t\t#Initiating a variable which stores the square of the radius of gyration\n\t#num_iter = 0\t\t\t\t\t#Initiating a variable which counts the number of iterations\n\tx_cm = np.sum(K[:,0])/len(K)\t\t\t#Centre of mass along the x-axis\n\ty_cm = np.sum(K[:,1])/len(K)\t\t\t#Centre of mass along the y-axis\n\tfor i in range(0, len(K)):\t\t\t#A for loop to calculate the radius of gyration\n\t\tR_2_g = R_2_g + ( (K[i,0] - x_cm)**2 + (K[i, 1] - y_cm)**2 )\n\t\tif (i%1000 == 0):\t\t\t#Just for checking the activity of the process the function print the filename and the iteration it is working on it at the moment\n\t\t\tprint(filename + '\\t' + str(i))\n\tR_g = np.sqrt(1.0/len(K)*R_2_g)\t\t\t#Calculate the square root of R_g_2\n\treturn filename + '\\t' + str(R_g)\t\t#The function returns the filename (for the purpose of sorting) and the radius of gyration\n\nfiles = []\nfor k in range(0,N):\n\tfiles.append('indices_%d' %(k))\nprint(files)\n\npool = mp.Pool(processes=CPU_n)\t\t\t\t#Call the pool method to set the number of processors\npool.map(percolation_culster_identifier, index_list)\t#Call the map method to perform a parallel computation\nresults = pool.map(gyration, files)\t\t\t#Gather the results from different CPUs and store them in variable 'results'\n\nchdir(filepath)\t\t\t\t\t\t#Change the directory to the root working directory\noutput = open('GyrationRad','w')\t\t\t#Open a stream output file to store the radii of gyrations\nfor i in results:\n\toutput.write(str(i) + '\\n')\t\t\t#Write the radius of gyration on the file\noutput.close()\t\t\t\t\t\t#Close the output file\n", "sub_path": "python_codes/Gyration_R.py", "file_name": "Gyration_R.py", "file_ext": "py", "file_size_in_byte": 3881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.chdir", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 23, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 48, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 56, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 60, "usage_type": "call"}]} {"seq_id": "190054285", "text": "#!/usr/bin/python3\nimport socket, threading, json, requests\n\n# Lista de usuarios online\nonline = []\n\n# Lista de mensagens recebidas\nmensagens = []\n\n# Funรงao que recebe as mensagens, imprime-as e envia de volta ao cliente um dicionario contendo o tipo da mensagem (flag) cliente que enviou a mensagem como chave e a sua mensagem como valor associado.\ndef Receber_Retornar(con, cli):\n # Variavel para armazenar o nome do cliente para ser usado depois que ele eh desconectado\n nome = \"\"\n while True:\n recebido = con.recv(1024)\n if not recebido: break\n recebido = recebido.decode(\"utf-8\")\n # Ex.: recebido = {\"flag\":\"MSG\", \"nome\":\"fulano\", \"msg\":\"ola, tudo bem?\"}\n recebido = json.loads(recebido)\n # Se o cliente enviar SAIR ele envia de volta para fechar a thread de receber dados\n if recebido[\"msg\"] == \"SAIR\":\n nome = recebido[\"nome\"]\n enviando = json.dumps([recebido])\n enviando = enviando.encode(\"utf-8\")\n con.send(enviando)\n else:\n # No inicio da aplicaรงao cliente ele envia a mensagem com flag = REG para ser printada a nova conexao\n if recebido[\"flag\"] == \"REG\":\n print(\"REG - Nova Conexao: {}-{}\".format(cli, recebido[\"nome\"]))\n for conexao in online:\n enviando = json.dumps(mensagens)\n enviando = enviando.encode(\"utf-8\")\n conexao.send(enviando)\n else:\n # Caso a flag seja igual a MSG armazena a mensagem na lista, printa a mensagem e envia a lista com todas as mensagens atuais para todas as conexoes online\n mensagens.append(recebido)\n print(\"MSG - Recebido {}-{}: {}\".format(cli,recebido[\"nome\"], recebido[\"msg\"]))\n for conexao in online:\n enviando = json.dumps(mensagens)\n enviando = enviando.encode(\"utf-8\")\n conexao.send(enviando)\n # Em caso do cliente desconectar-se sua conexao eh removida da lista de usuarios online\n # e eh printado que o tal cliente se desconectou, logo em seguida encerra-se a conexao\n online.remove(con)\n print(\"REG - Desconectado: {}-{}\".format(cli, nome))\n con.close()\n\n# Funรงao que define o socket do servidor adicionando novas conexoes a lista de usuarios online e executando a thread de receber e retornar\ndef Sock():\n host = \"127.0.0.1\"\n port = 50000\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((host, port))\n s.listen(5)\n print(\"Servidor Ligado!\")\n while True:\n con, cli = s.accept()\n online.append(con)\n threading.Thread(target=Receber_Retornar, args=(con, cli)).start()\n\n# Chamada a funรงao principal (Sock)\nSock()\n", "sub_path": "Prog-Redes/Prova1/servidor.py", "file_name": "servidor.py", "file_ext": "py", "file_size_in_byte": 2821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 53, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 53, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 53, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 60, "usage_type": "call"}]} {"seq_id": "515263608", "text": "import logging\n\nfrom bentoml.yatai.proto.repository_pb2 import BentoUri\nfrom e2e_tests.cli_operations import delete_bento\nfrom e2e_tests.sample_bento_service import SampleBentoService\nfrom e2e_tests.yatai_server.utils import (\n get_bento_service_info,\n execute_bentoml_run_command,\n local_yatai_server,\n)\n\nlogger = logging.getLogger('bentoml.test')\n\n\ndef test_yatai_server_with_postgres_and_gcs(postgres_db_container_url):\n\n gcs_bucket_name = 'gs://bentoml-e2e-tests/'\n\n with local_yatai_server(\n db_url=postgres_db_container_url, repo_base_url=gcs_bucket_name\n ):\n logger.info('Saving bento service')\n svc = SampleBentoService()\n svc.save()\n bento_tag = f'{svc.name}:{svc.version}'\n logger.info('BentoService saved')\n\n logger.info(\"Display bentoservice info\")\n bento = get_bento_service_info(svc.name, svc.version)\n logger.info(bento)\n assert (\n bento.uri.type == BentoUri.GCS\n ), 'BentoService storage type mismatched, expect GCS'\n\n logger.info('Validate BentoService prediction result')\n run_result = execute_bentoml_run_command(bento_tag, '[]')\n assert 'cat' in run_result, 'Unexpected BentoService prediction result'\n\n logger.info(f'Deleting saved bundle {bento_tag}')\n delete_svc_result = delete_bento(bento_tag)\n assert f\"{bento_tag} deleted\" in delete_svc_result\n", "sub_path": "e2e_tests/yatai_server/test_postgres_gcs.py", "file_name": "test_postgres_gcs.py", "file_ext": "py", "file_size_in_byte": 1419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "e2e_tests.yatai_server.utils.local_yatai_server", "line_number": 19, "usage_type": "call"}, {"api_name": "e2e_tests.sample_bento_service.SampleBentoService", "line_number": 23, "usage_type": "call"}, {"api_name": "e2e_tests.yatai_server.utils.get_bento_service_info", "line_number": 29, "usage_type": "call"}, {"api_name": "bentoml.yatai.proto.repository_pb2.BentoUri.GCS", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bentoml.yatai.proto.repository_pb2.BentoUri", "line_number": 32, "usage_type": "name"}, {"api_name": "e2e_tests.yatai_server.utils.execute_bentoml_run_command", "line_number": 36, "usage_type": "call"}, {"api_name": "e2e_tests.cli_operations.delete_bento", "line_number": 40, "usage_type": "call"}]} {"seq_id": "446130911", "text": "from numba import cuda\nimport numpy as np\nimport math\nimport time\n\n'''\nDistance Matrix Calculation Accelerated by GPGPU\n'''\n\n@cuda.jit\ndef dist_matrix_kernel(vectors, sizes, dist_matrix):\n ## Input 1: the list of vectors\n ## Input 3: the size of each vector\n ## Output : distance matrix\n i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x\n j = cuda.threadIdx.y + cuda.blockIdx.y * cuda.blockDim.y\n\n vsize = sizes[0]\n isize = sizes[1]\n\n if i >= isize or j >= isize:\n return\n if i == j:\n dist_matrix[i, j] = 0\n return\n else:\n dist = 0\n for idx in range(vsize):\n dist += (vectors[i, idx] - vectors[j, idx]) ** 2\n dist_matrix[i, j] = math.sqrt(dist)\n \n## GPU Accelerated Distance matrix calculation\ndef dist_matrix_gpu(vectors):\n vector_size = len(vectors[0])\n item_num = len(vectors)\n\n ## INPUT\n vectors_global_mem = cuda.to_device(vectors)\n sizes_global_mem = cuda.to_device(np.array([vector_size, item_num]))\n\n ## OUTPUT\n dist_matrix_global_mem = cuda.device_array((item_num, item_num))\n \n ## Run Kernel\n TPB = 16\n tpb = (TPB, TPB) \n bpg = (math.ceil(item_num / TPB), math.ceil(item_num / TPB))\n\n dist_matrix_kernel[bpg, tpb](vectors_global_mem, sizes_global_mem, dist_matrix_global_mem)\n \n ## GET Result\n dist_matrix = dist_matrix_global_mem.copy_to_host()\n\n return dist_matrix\n\n## CPU Distance matrix calculation (for comparison & test)\ndef dist_matrix_cpu(vectors):\n vector_size = len(vectors[0])\n item_num = len(vectors)\n\n dist_matrix = np.zeros((item_num, item_num))\n\n for i in range(item_num):\n for j in range(i):\n dist_matrix[i, j] = np.linalg.norm(vectors[i] - vectors[j])\n dist_matrix[j, i] = dist_matrix[i, j]\n\n return dist_matrix\n\n'''\nk-nearest neighbor implementation with \nthe further acceleration acheived by preprocessed distance matrix\n'''\n\n", "sub_path": "src/snc/helpers/distance_matrix.py", "file_name": "distance_matrix.py", "file_ext": "py", "file_size_in_byte": 1953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numba.cuda.threadIdx", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 15, "usage_type": "name"}, {"api_name": "numba.cuda.blockIdx", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numba.cuda.blockDim", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numba.cuda.threadIdx", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 16, "usage_type": "name"}, {"api_name": "numba.cuda.blockIdx", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numba.cuda.blockDim", "line_number": 16, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "numba.cuda.jit", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numba.cuda", "line_number": 10, "usage_type": "name"}, {"api_name": "numba.cuda.to_device", "line_number": 38, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 38, "usage_type": "name"}, {"api_name": "numba.cuda.to_device", "line_number": 39, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numba.cuda.device_array", "line_number": 42, "usage_type": "call"}, {"api_name": "numba.cuda", "line_number": 42, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 65, "usage_type": "attribute"}]} {"seq_id": "495485625", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: Daniel Schreij\n\nThis module is distributed under the Apache v2.0 License.\nYou should have received a copy of the Apache v2.0 License\nalong with this module. If not, see .\n\"\"\"\n# Python3 compatibility\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom QOpenScienceFramework.compat import *\nfrom QOpenScienceFramework.manager import ConnectionManager\nfrom QOpenScienceFramework import connection as osf\nfrom QOpenScienceFramework import widgets, events\nfrom qtpy import QtWidgets, QtCore\nfrom dotenv import load_dotenv\n\n# Import basics\nimport sys\nimport os\nimport logging\nimport tempfile\nlogging.basicConfig(level=logging.INFO)\nload_dotenv()\n\nos.environ[\"QT_AUTO_SCREEN_SCALE_FACTOR\"] = \"1\"\n\n# Required QT classes\n# Widgets\n# Event dispatcher and listeners\n\n# CONFIGURE THE CLIENT ID AND REDIRECT URI HERE. REGISTER AT OSF.IO\n# You can set the parameters here, or place them as environment variables in a .env file\nclient_id = os.getenv(\"OSF_CLIENT_ID\", \"\")\nredirect_uri = os.getenv(\"OSF_REDIRECT_URI\", \"\")\n\n\nclass InvalidateButton(QtWidgets.QWidget):\n \"\"\" Just a button to tamper with the OSF session and see what the app does\n to recover from missing authentication information \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(InvalidateButton, self).__init__(*args, **kwargs)\n self.setLayout(QtWidgets.QHBoxLayout())\n pb = QtWidgets.QPushButton(\"Invalidate session\")\n pb.clicked.connect(self.invalidate_session)\n self.layout().addWidget(pb)\n\n def invalidate_session(self):\n print(\"Invalidating session!\")\n osf.session = osf.create_session()\n print(osf.session.token)\n\n\nclass StandAlone(object):\n \"\"\" Class that opens all available widgets when instantiated for testing\n purposes. \"\"\"\n\n def __init__(self):\n # Check if client_id and redirect_uri have been changed\n if client_id == \"\":\n raise RuntimeError(\"Please insert the client_id you have registered\"\n \" for your app at the OSF\")\n if redirect_uri == \"\":\n raise RuntimeError(\"Please insert the redirect uri you have registered\"\n \" for your app at the OSF\")\n\n # Set OSF server settings\n server_settings = {\n \"client_id\"\t\t: client_id,\n \"redirect_uri\"\t: redirect_uri,\n }\n # Add these settings to the general settings\n osf.settings.update(server_settings)\n osf.create_session()\n\n tmp_dir = safe_decode(tempfile.gettempdir())\n tokenfile = os.path.join(tmp_dir, u\"osf_token.json\")\n # Create manager object\n self.manager = ConnectionManager(tokenfile=tokenfile)\n\n # Init and set up user badge\n self.user_badge = widgets.UserBadge(self.manager)\n self.user_badge.move(850, 100)\n\n # Set-up project tree\n project_tree = widgets.ProjectTree(self.manager, use_theme=\"Faenza\")\n\n # Init and set up Project explorer\n self.project_explorer = widgets.OSFExplorer(\n self.manager, tree_widget=project_tree\n )\n self.project_explorer.move(50, 100)\n\n # Token file listener writes the token to a json file if it receives\n # a logged_in event and removes this file after logout\n # Filename of the file to store token information in.\n self.tfl = events.TokenFileListener(tokenfile)\n\n self.manager.dispatcher.add_listeners(\n [\n self.manager, self.tfl, project_tree,\n self.user_badge, self.project_explorer\n ]\n )\n # Connect click on user badge logout button to osf logout action\n self.user_badge.logout_request.connect(self.manager.logout)\n self.user_badge.login_request.connect(self.manager.login)\n\n # self.ib = InvalidateButton()\n # self.ib.setGeometry(850,200,200,50)\n # self.ib.show()\n\n # If a valid token is stored in token.json, use that.\n # Otherwise show the login window.\n self.manager.login()\n # Show the user badge\n self.user_badge.show()\n self.project_explorer.show()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n\n print(\"Using Qt {}\".format(QtCore.PYQT_VERSION_STR))\n\n # Enable High DPI display with PyQt5\n if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):\n app.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)\n if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):\n app.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n\n test = StandAlone()\n exitcode = app.exec_()\n logging.info(\"App exiting with code {}\".format(exitcode))\n sys.exit(exitcode)\n", "sub_path": "example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 4925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.basicConfig", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 39, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 40, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 43, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 43, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QHBoxLayout", "line_number": 49, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 49, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPushButton", "line_number": 50, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 50, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.connection.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "QOpenScienceFramework.connection", "line_number": 56, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.connection.create_session", "line_number": 56, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.connection.session", "line_number": 57, "usage_type": "attribute"}, {"api_name": "QOpenScienceFramework.connection", "line_number": 57, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.connection.settings.update", "line_number": 79, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.connection.settings", "line_number": 79, "usage_type": "attribute"}, {"api_name": "QOpenScienceFramework.connection", "line_number": 79, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.connection.create_session", "line_number": 80, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.connection", "line_number": 80, "usage_type": "name"}, {"api_name": "tempfile.gettempdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "QOpenScienceFramework.manager.ConnectionManager", "line_number": 85, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.widgets.UserBadge", "line_number": 88, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.widgets", "line_number": 88, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.widgets.ProjectTree", "line_number": 92, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.widgets", "line_number": 92, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.widgets.OSFExplorer", "line_number": 95, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.widgets", "line_number": 95, "usage_type": "name"}, {"api_name": "QOpenScienceFramework.events.TokenFileListener", "line_number": 103, "usage_type": "call"}, {"api_name": "QOpenScienceFramework.events", "line_number": 103, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QApplication", "line_number": 128, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 128, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 128, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore.PYQT_VERSION_STR", "line_number": 130, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore", "line_number": 130, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 133, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore", "line_number": 133, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 134, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore", "line_number": 134, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 135, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore", "line_number": 135, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 136, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore", "line_number": 136, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 140, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 141, "usage_type": "call"}]} {"seq_id": "293214688", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('promotion', '0001_initial'),\n ('shop', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='promotionitem',\n name='item',\n field=models.ForeignKey(verbose_name='\\u0422\\u043e\\u0432\\u0430\\u0440', to='shop.Product'),\n ),\n migrations.AddField(\n model_name='promotionitem',\n name='promotion',\n field=models.ForeignKey(verbose_name='\\u0410\\u043a\\u0446\\u0438\\u044f', to='promotion.Promotion'),\n ),\n ]\n", "sub_path": "promotion/migrations/0002_auto_20150918_2045.py", "file_name": "0002_auto_20150918_2045.py", "file_ext": "py", "file_size_in_byte": 702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}]} {"seq_id": "171689874", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 2 13:13:38 2018\r\n\r\n@author: htyagi\r\n\"\"\"\r\n\r\ndef readCSV(location = r'D:\\Users\\htyagi\\Desktop\\conda\\NSE-INFY.csv'):\r\n import datetime,re\r\n data = []\r\n with open(location, 'r') as infyRowsData: \r\n for row in infyRowsData:\r\n row.strip()\r\n column = row.split(',')\r\n column = list(map(lambda x:x.strip(), column))\r\n match = re.search('Date', column[0])\r\n if match or column[0]=='':\r\n continue\r\n else:\r\n datex = datetime.datetime.strptime(column[0], '%m/%d/%Y')\r\n column[0] =datetime.datetime.strftime(datex, '%Y-%m-%d')\r\n data.append(tuple(column))\r\n return data\r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n print(readCSV(r'D:\\Users\\htyagi\\Desktop\\conda\\NSE-INFY.csv'))", "sub_path": "readCSV.py", "file_name": "readCSV.py", "file_ext": "py", "file_size_in_byte": 894, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.search", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}]} {"seq_id": "250501244", "text": "##CTRL+SHIFT+B to run\n\n##VARIABLES\na=2\nb=4\nprint(a*b)\n\nmyString = \"Hello World\"\n\nanotherString = \", my name is \"\nmyName = \"Amber!\"\n\nfullString = myString + anotherString + myName\n\nprint(fullString)\n\n##LISTS\nmyList = [1,2,3]\nanotherList = ['dog','cat','panda']\n\nmyList[0]\nmyList[1]\n\nmyNewString = \"I have \" + str(myList[0]) + \" \" + anotherList[2]\nprint(myNewString)\n\nmyList.append(4)\n\nmyNumbers = [8,9,10]\nmyList.extend(myNumbers)\nprint(myList)\n\n##LOOPS\nnewList = []\n\nfor x in myList:\n newList.append(x*2)\nprint(newList)\n\n##FUNCTIONS\nname = \"Amber\"\nage = 18\n\ndef NameAdder(x,y):\n nameAgeString = \"Hi my name is {} and I'm {}\".format(x,str(y))\n return nameAgeString\n\nmySentence = NameAdder(name,age)\nprint(mySentence)\n\n##READING DATA FROM A CSV\nimport pandas as pd\n\nmy_file = pd.read_csv(\"AAPL.csv\")\n\ndf = pd.DataFrame(my_file)\ndf.head()\ndf['close']\nd = df['close']\nd[:5]\nd[0:50:2]\ncol_open = df['open']\n\nimport numpy as np\nnp.mean(col_open)\n\nts = pd.Series(np.random.randn(1000), index = pd.date_range('1/23/2019', periods = 1000))\n\n##Plotting the data\nimport matplotlib.pyplot as pyplot\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\nts = ts.cumsum()\npyplot.plot(ts)\npyplot.show()\n", "sub_path": "Turing Club/turingClubWorkshopJan21.py", "file_name": "turingClubWorkshopJan21.py", "file_ext": "py", "file_size_in_byte": 1236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pandas.date_range", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.plotting.register_matplotlib_converters", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}]} {"seq_id": "103750496", "text": "import pyttsx3\r\nimport tkinter as tk\r\nimport tkinter.font as font\r\nfrom tkinter.constants import CENTER, COMMAND, DISABLED, LEFT, NORMAL, VERTICAL, W\r\nfrom tkinter.ttk import Progressbar\r\nfrom typing import Text\r\nfrom pygame import mixer\r\n\r\n#set male voice for calling\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices') \r\nengine.setProperty('voices', voices[1].id)\r\n\r\nmixer.init()\r\n#music playing\r\nmixer.music.load('kbc.mp3')\r\nmixer.music.play(-1)\r\n\r\n\r\n\r\n#creating tkinter window\r\nroot = tk.Tk()\r\nroot.title(\"Millionaire\")\r\n# tkinter window with dimensions 1270x652 at x =0, y=0\r\nroot.geometry('1270x652+0+0')\r\nroot.configure(background='black')\r\n#--------------------------------------------------frames------------------------\r\nleftFrame = tk.Frame(root, bg='black', padx = 90)\r\nleftFrame.grid(row=0, column=0)\r\n\r\ntopFrame = tk.Frame(leftFrame, bg='black', pady = 15)\r\ntopFrame.grid()\r\n\r\ncenterFrame = tk.Frame(leftFrame, bg='black', pady = 15)\r\ncenterFrame.grid(row=1, column=0)\r\n\r\nbottomFrame = tk.Frame(leftFrame)\r\nbottomFrame.grid(row=2, column=0)\r\n\r\nrightFrame = tk.Frame(root, padx = 50, pady = 25, bg='black')\r\nrightFrame.grid(row=0, column=1)\r\n#--------------------------------------------------Images------------------------\r\ndef live50():\r\n live50_50.config(image=image50_50used, state=DISABLED)\r\n if quesArea.get(1.0, 'end-1c') == questions[0]:\r\n btnOption1.config(text='')\r\n btnOption2.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[1]:\r\n btnOption1.config(text='')\r\n btnOption3.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[2]:\r\n btnOption2.config(text='')\r\n btnOption4.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[3]:\r\n btnOption3.config(text='')\r\n btnOption4.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[4]:\r\n btnOption2.config(text='')\r\n btnOption4.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[5]:\r\n btnOption4.config(text='')\r\n btnOption2.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[6]:\r\n btnOption1.config(text='')\r\n btnOption2.config(text='')\r\n if quesArea.get(1.0, 'end-1c') == questions[7]:\r\n btnOption3.config(text='')\r\n btnOption4.config(text='')\r\n\r\ndef livepeo():\r\n livePeople.config(image=imagePeopleused, state=DISABLED)\r\n progressbarA.place(x=580, y =190)\r\n progressbarB.place(x=620, y =190)\r\n progressbarC.place(x=660, y =190)\r\n progressbarD.place(x=700, y =190)\r\n\r\n lblprogressbarA.place(x=580,y=320)\r\n lblprogressbarB.place(x=620,y=320)\r\n lblprogressbarC.place(x=660,y=320)\r\n lblprogressbarD.place(x=700,y=320)\r\n\r\n if quesArea.get(1.0, 'end-1c')==questions[0]:\r\n progressbarA.config(value=30)\r\n progressbarB.config(value=50)\r\n progressbarC.config(value=70)\r\n progressbarD.config(value=30)\r\n if quesArea.get(1.0, 'end-1c')==questions[1]:\r\n progressbarA.config(value=10)\r\n progressbarB.config(value=80)\r\n progressbarC.config(value=20)\r\n progressbarD.config(value=15)\r\n if quesArea.get(1.0, 'end-1c')==questions[2]:\r\n progressbarA.config(value=50)\r\n progressbarB.config(value=70)\r\n progressbarC.config(value=95)\r\n progressbarD.config(value=30)\r\n if quesArea.get(1.0, 'end-1c')==questions[3]:\r\n progressbarA.config(value=85)\r\n progressbarB.config(value=25)\r\n progressbarC.config(value=10)\r\n progressbarD.config(value=5)\r\n if quesArea.get(1.0, 'end-1c')==questions[4]:\r\n progressbarA.config(value=75)\r\n progressbarB.config(value=35)\r\n progressbarC.config(value=15)\r\n progressbarD.config(value=10)\r\n if quesArea.get(1.0, 'end-1c')==questions[5]:\r\n progressbarA.config(value=70)\r\n progressbarB.config(value=40)\r\n progressbarC.config(value=30)\r\n progressbarD.config(value=10)\r\n if quesArea.get(1.0, 'end-1c')==questions[6]:\r\n progressbarA.config(value=10)\r\n progressbarB.config(value=40)\r\n progressbarC.config(value=15)\r\n progressbarD.config(value=65)\r\n if quesArea.get(1.0, 'end-1c')==questions[7]:\r\n progressbarA.config(value=95)\r\n progressbarB.config(value=25)\r\n progressbarC.config(value=40)\r\n progressbarD.config(value=30)\r\n\r\ndef livelinephone():\r\n mixer.music.load('calling.mp3')\r\n mixer.music.play()\r\n btnCall.place(x=70, y=260)\r\n livePhone.config(image=imagePhoneused, state=DISABLED)\r\n\r\ndef phoneclick():\r\n for i in range(8):\r\n if quesArea.get(1.0, 'end-1c')==questions[i]:\r\n engine.say(f'ฤรกp รกn lร  {correct_answers[i]}')\r\n engine.runAndWait()\r\n# engine.stop()\r\n\r\n#--------------------------------------------------Images------------------------\r\ncenterImage = tk.PhotoImage(file='center.png')\r\nlogoCenter = tk.Button(centerFrame, image= centerImage, bg = 'black', width=300, height=200, bd =0)\r\nlogoCenter.grid(row=0, column=0)\r\n\r\n# 50-50\r\nimage50_50 = tk.PhotoImage(file='5050.png')\r\nimage50_50used = tk.PhotoImage(file='5050used.png')\r\nlive50_50 = tk.Button(topFrame, image= image50_50, bg = 'black', width=180, height=80,bd =0, activebackground='black', command=live50)\r\nlive50_50.grid(row=0,column=0)\r\n\r\n# ask people\r\nimagePeople = tk.PhotoImage(file='people.png')\r\nimagePeopleused = tk.PhotoImage(file='peopleused.png')\r\nlivePeople = tk.Button(topFrame, image= imagePeople, bg = 'black', width=180, height=80,bd =0, activebackground='black', command=livepeo)\r\nlivePeople.grid(row=0,column=1)\r\n\r\n# phone\r\nimagePhone = tk.PhotoImage(file='phone.png')\r\nimagePhoneused = tk.PhotoImage(file='phoneused.png')\r\nlivePhone = tk.Button(topFrame, image= imagePhone, bg = 'black', width=180, height=80,bd =0, activebackground='black', command=livelinephone)\r\nlivePhone.grid(row=0,column=2)\r\n\r\nimgCall = tk.PhotoImage(file='calling.png')\r\nbtnCall = tk.Button(root, image=imgCall, bd=0, bg='black', activebackground='black',cursor='hand2', command=phoneclick)\r\n\r\n# money table\r\nimageList = tk.PhotoImage(file='Picture0.png')\r\nimageList1 = tk.PhotoImage(file='Picture1.png')\r\nimageList2 = tk.PhotoImage(file='Picture2.png')\r\nimageList3 = tk.PhotoImage(file='Picture3.png')\r\nimageList4 = tk.PhotoImage(file='Picture4.png')\r\nimageList5 = tk.PhotoImage(file='Picture5.png')\r\nimageList6 = tk.PhotoImage(file='Picture6.png')\r\nimageList7 = tk.PhotoImage(file='Picture7.png')\r\nimageList8 = tk.PhotoImage(file='Picture8.png')\r\n\r\namountImage = [imageList1,imageList2,imageList3,imageList4,imageList5,imageList6,imageList7,imageList8]\r\n\r\nimgList = tk.Label(rightFrame, image= imageList, bg = 'black',bd =0)\r\nimgList.grid(row=0,column=0)\r\n\r\n# questions\r\nimageLayout = tk.PhotoImage(file = 'lay.png')\r\nimgLayout = tk.Label(bottomFrame, image=imageLayout, bg = 'black')\r\nimgLayout.grid(row=0, column = 0)\r\n\r\n#--------------------------------------------------Million questions--------------------\r\n# function to be called when the selection changes\r\ndef select(event):\r\n btnCall.place_forget()\r\n progressbarA.place_forget()\r\n progressbarB.place_forget()\r\n progressbarC.place_forget()\r\n progressbarD.place_forget()\r\n\r\n lblprogressbarA.place_forget()\r\n lblprogressbarB.place_forget()\r\n lblprogressbarC.place_forget()\r\n lblprogressbarD.place_forget()\r\n\r\n b=event.widget\r\n value=b['text']\r\n for i in range(8):\r\n if value==correct_answers[i]:\r\n if value == correct_answers[7]:\r\n def close():\r\n root2.destroy()\r\n root.destroy()\r\n\r\n def playagain():\r\n live50_50.config(state=NORMAL, image = image50_50)\r\n livePeople.config(state=NORMAL, image = imagePeople)\r\n livePhone.config(state=NORMAL, image = imagePhone)\r\n root2.destroy()\r\n quesArea.delete(1.0, tk.END)\r\n quesArea.insert(tk.END, questions[0])\r\n\r\n btnOption1.config(text=first_option[0])\r\n btnOption2.config(text=second_option[0])\r\n btnOption3.config(text=third_option[0])\r\n btnOption4.config(text=fourth_option[0])\r\n imgList.config(image=imageList)\r\n\r\n mixer.music.load('kbcwon.mp3')\r\n mixer.music.play()\r\n root2 = tk.Toplevel()\r\n root2.overrideredirect(True)\r\n root2.config(bg=\"black\")\r\n root2.geometry(\"1270x652+10+30\")\r\n root2.title(\"You won 0 VND\")\r\n lblImage = tk.Label(root2, image= centerImage, bd = 0, bg='black')\r\n lblImage.pack(pady=30)\r\n\r\n lblWin = tk.Label(root2, text=\"You won\", font=('arial',50,'bold'), bg=\"black\", fg='white')\r\n lblWin.pack()\r\n\r\n btnPlayAgain = tk.Button(root2, text=\"Play again\", font=('arial',30,\"bold\"),bg=\"black\",fg=\"white\",\r\n activebackground=\"black\", activeforeground=\"white\", bd =0, cursor=\"hand2\", command=playagain)\r\n btnPlayAgain.pack(side = 'top', fill = 'both', expand = 'yes')\r\n\r\n btnClose = tk.Button(root2, text=\"close\", font=('arial',30,\"bold\"),bg=\"black\",fg=\"white\",\r\n activebackground=\"black\", activeforeground=\"white\", bd =0, cursor=\"hand2\", command=close)\r\n btnClose.pack(side = 'bottom', fill = 'both', expand = 'yes')\r\n \r\n\r\n root2.mainloop()\r\n break\r\n # next question, answers\r\n quesArea.delete(\"1.0\", tk.END)\r\n quesArea.insert(tk.END, questions[i+1])\r\n\r\n btnOption1.config(text=first_option[i+1])\r\n btnOption2.config(text=second_option[i+1])\r\n btnOption3.config(text=third_option[i+1])\r\n btnOption4.config(text=fourth_option[i+1])\r\n imgList.config(image=amountImage[i])\r\n \r\n if value not in correct_answers:\r\n def close():\r\n root1.destroy()\r\n root.destroy()\r\n\r\n def tryagain():\r\n live50_50.config(state=NORMAL, image = image50_50)\r\n livePeople.config(state=NORMAL, image = imagePeople)\r\n livePhone.config(state=NORMAL, image = imagePhone)\r\n\r\n root1.destroy()\r\n quesArea.delete(1.0, tk.END)\r\n quesArea.insert(tk.END, questions[0])\r\n\r\n btnOption1.config(text=first_option[0])\r\n btnOption2.config(text=second_option[0])\r\n btnOption3.config(text=third_option[0])\r\n btnOption4.config(text=fourth_option[0])\r\n imgList.config(image=imageList)\r\n\r\n root1 = tk.Toplevel()\r\n root1.overrideredirect(True)\r\n root1.config(bg=\"black\")\r\n root1.geometry(\"1270x652+10+30\")\r\n root1.title(\"You won 0 VND\")\r\n lblImage = tk.Label(root1, image= centerImage, bd = 0, bg='black')\r\n lblImage.pack(pady=30)\r\n\r\n lblLose = tk.Label(root1, text=\"You lose\", font=('arial',50,'bold'), bg=\"black\", fg='white')\r\n lblLose.pack()\r\n\r\n btnTryAgain = tk.Button(root1, text=\"Try again\", font=('arial',30,\"bold\"),bg=\"black\",fg=\"white\",\r\n activebackground=\"black\", activeforeground=\"white\", bd =0, cursor=\"hand2\", command=tryagain)\r\n btnTryAgain.pack(side = 'top', fill = 'both', expand = 'yes')\r\n\r\n btnClose = tk.Button(root1, text=\"close\", font=('arial',30,\"bold\"),bg=\"black\",fg=\"white\",\r\n activebackground=\"black\", activeforeground=\"white\", bd =0, cursor=\"hand2\", command=close)\r\n btnClose.pack(side = 'bottom', fill = 'both', expand = 'yes')\r\n \r\n\r\n root1.mainloop()\r\n break\r\n \r\n\r\ncorrect_answers = [\"Phแปฅ nแปฏ phแบฃi ฤ‘ฦฐแปฃc ฤ‘แป‘i xแปญ nhฦฐ nam giแป›i trแปซ khi รกp dแปฅng cรกc trฦฐแปng hแปฃp ฤ‘แบทc biแป‡t\",\r\n \"Nแป™i trแปฃ\",\r\n \"Xแปญ lรฝ kแปท luแบญt, xแปญ lรฝ hร nh chรญnh hoแบทc bแป‹ truy cแปฉu trรกch nhiแป‡m hรฌnh sแปฑ\",\r\n \"Thแปฉ 2\",\r\n \"Lแปฑa chแปn giแป›i tรญnh thai nhi\",\r\n \"Tแป•ng ฤ‘ร i quแป‘c gia bแบฃo vแป‡ trแบป em\",\r\n \"Khi ฤ‘แบฟn nhร  cรด giรกo, cแบฃ nam vร  nแปฏ cรนng giรบp cรด phแบงn nแป™i trแปฃ\",\r\n \"40/53\"]\r\n\r\nquestions = [\"Bรฌnh ฤ‘แบณng giแป›i nghฤฉa lร  gรฌ?\",\r\n \"แปž nhiแปu quแป‘c gia, hoแบกt ฤ‘แป™ng nร o chiแบฟm nhiแปu thแปi gian cแปงa phแปฅ nแปฏ mร  nam giแป›i khรดng phแบฃi giแบฃi quyแบฟt?\",\r\n \"Luแบญt Bรฌnh ฤ‘แบณng giแป›i quy ฤ‘แป‹nh hรฌnh thแปฉc xแปญ lรฝ vi phแบกm phรกp luแบญt vแป bรฌnh ฤ‘แบณng giแป›i nhฦฐ thแบฟ nร o?\",\r\n \"Nฤƒm 2020, Viแป‡t Nam xแบฟp thแปฉ bao nhiรชu trong khu vแปฑc ASEAN vแป chแป‰ sแป‘ cam kแบฟt giแบฃm bแบฅt bรฌnh ฤ‘แบณng?\",\r\n \"Hร nh vi nร o vi phแบกm phรกp luแบญt vแป bรฌnh ฤ‘แบณng giแป›i trong lฤฉnh vแปฑc y tแบฟ?\",\r\n \"111 lร  hotline cแปงa tแป•ng ฤ‘ร i nร o\",\r\n \"Cรขu nร o sau ฤ‘รขy thแปƒ hiแป‡n bรฌnh ฤ‘แบณng giแป›i?\",\r\n \"Bao nhiรชu dรขn tแป™c thiแปƒu sแป‘ แปŸ nฦฐแป›c ta cรณ tแปท lแป‡ tแบฃo hรดn tแปซ 20% trแปŸ lรชn?\"]\r\n \r\nfirst_option = [\"Tแบฅt cแบฃ phแปฅ nแปฏ ฤ‘แปu ฤ‘ฦฐแปฃc ฤ‘แป‘i xแปญ nhฦฐ nhau\",\r\n \"ฤi hแปc\",\r\n \"Gรณp รฝ, phรช bรฌnh\",\r\n \"Thแปฉ 2\",\r\n \"Lแปฑa chแปn giแป›i tรญnh thai nhi\",\r\n \"Tแป•ng ฤ‘ร i quแป‘c gia bแบฃo vแป‡ trแบป em\",\r\n \"Cรด chแปง nhiแป‡m nรณi 'Chแป‰ con gรกi ฤ‘ฦฐแปฃc ฤ‘แปƒ tรณc dร i', yรชu cแบงu bแบกn nam ฤ‘แปƒ tรณc dร i trong lแป›p phแบฃi cแบฏt tรณc ngแบฏn\",\r\n \"40/53\"]\r\nsecond_option = [\"Tแบฅt cแบฃ nam giแป›i ฤ‘แปu ฤ‘ฦฐแปฃc ฤ‘แป‘i xแปญ nhฦฐ nhau\",\r\n \"Nแป™i trแปฃ\",\r\n \"Cแบฃnh cรกo, khiแปƒn trรกch\",\r\n \"Thแปฉ 7\",\r\n \"Thแปฑc hiแป‡n phong tแปฅc tแบญp quรกn lแบกc hแบญu mang tรญnh phรขn biแป‡t ฤ‘แป‘i xแปญ vแป giแป›i dฦฐแป›i mแปi hรฌnh thแปฉc\",\r\n \"Tแป•ng ฤ‘ร i yรชu cแบงu trแปฃ giรบp vร  tรฌm kiแบฟm cแปฉu nแบกn trรชn phแบกm vi toร n quแป‘c\",\r\n \"Nhแปฏng bแบกn nam cฦฐแปi ฤ‘รนa, dรนng tแปซ ngแปฏ 'ฤ‘แบงy ฤ‘แปง ฤ‘iแป‡n nฦฐแป›c'khi bแบฏt gแบทp bแบกn nแปฏ cรณ thรขn hรฌnh ฤ‘แบงy ฤ‘แบทn\",\r\n \"37/53\"]\r\nthird_option = [\"Phแปฅ nแปฏ phแบฃi ฤ‘ฦฐแปฃc ฤ‘แป‘i xแปญ nhฦฐ nam giแป›i trแปซ khi รกp dแปฅng cรกc trฦฐแปng hแปฃp ฤ‘แบทc biแป‡t\",\r\n \"Kiแบฟm thu nhแบญp\",\r\n \"Xแปญ lรฝ kแปท luแบญt, xแปญ lรฝ hร nh chรญnh hoแบทc bแป‹ truy cแปฉu trรกch nhiแป‡m hรฌnh sแปฑ\",\r\n \"Thแปฉ 4\",\r\n \"Tแปซ chแป‘i tuyแปƒn dแปฅng nhแปฏng ngฦฐแปi phรน hแปฃp vรฌ lรฝ do giแป›i tรญnh hoแบทc do viแป‡c mang thai/sinh con/cรณ con nhแป\",\r\n \"Tแป•ng ฤ‘ร i cแปฉu hรณa\",\r\n \"Hรฌnh แบฃnh minh hแปa sรกch giรกo khoa luรดn lแปฑa chแปn nam giแป›i cho cรกc nghแป bรกc sฤฉ/cรดng an cรฒn nแปฏ lร m nแป™i trแปฃ\",\r\n \"16/53\"]\r\nfourth_option = [\"Phแปฅ nแปฏ nรชn ฤ‘ฦฐแปฃc ฤ‘แป‘i xแปญ nhฦฐ nam giแป›i.\",\r\n \"Lร m vฦฐแปn\",\r\n \"Chฦฐa cรณ hรฌnh thแปฉc xแปญ lรฝ\",\r\n \"Thแปฉ 5\",\r\n \"Cho thรดi viแป‡c ngฦฐแปi lao ฤ‘แป™ng vรฌ lรฝ do mang thai\",\r\n \"Tแป•ng ฤ‘ร i cแบฅp cแปฉu\",\r\n \"Khi ฤ‘แบฟn nhร  cรด giรกo, cแบฃ nam vร  nแปฏ cรนng giรบp cรด phแบงn nแป™i trแปฃ\",\r\n \"28/53\"]\r\n\r\n# question area\r\nquesArea = tk.Text(bottomFrame, font=('arial',10,'bold'), width = 50, height=3, wrap='word', bg='black',\r\nfg='white', bd=0)\r\nquesArea.place(x =70, y = 10)\r\n\r\nquesArea.insert(tk.END, questions[0])\r\n\r\n# answer A B C D\r\nlblA = tk.Label(bottomFrame, text='A: ', bg='black', fg ='white', font=('arial',10,'bold'))\r\nlblA.place(x=60,y=110)\r\n\r\nbtnOption1 = tk.Button(bottomFrame, text=first_option[0], font=('arial',8,'bold'), bg = 'black', fg = 'white', bd = 0, activebackground='black',\r\nactiveforeground='white', cursor=\"hand2\", wraplength=150, justify=LEFT)\r\nbtnOption1.place(x=100, y =100)\r\n\r\nlblB = tk.Label(bottomFrame, text='B: ', bg='black', fg ='white', font=('arial',10,'bold'))\r\nlblB.place(x=330,y=110)\r\n\r\nbtnOption2 = tk.Button(bottomFrame, text=second_option[0], font=('arial',8,'bold'), bg = 'black', fg = 'white', bd = 0, activebackground='black',\r\nactiveforeground='white', cursor=\"hand2\", wraplength=150, justify=LEFT)\r\nbtnOption2.place(x=370, y =100)\r\n\r\nlblC = tk.Label(bottomFrame, text='C: ', bg='black', fg ='white', font=('arial',10,'bold'))\r\nlblC.place(x=60,y=190)\r\n\r\nbtnOption3 = tk.Button(bottomFrame, text=third_option[0], font=('arial',8,'bold'), bg = 'black', fg = 'white', bd = 0, activebackground='black',\r\nactiveforeground='white', cursor=\"hand2\", wraplength=150, justify=LEFT)\r\nbtnOption3.place(x=100, y =180)\r\n\r\nlblD = tk.Label(bottomFrame, text='D: ', bg='black', fg ='white', font=('arial',10,'bold'))\r\nlblD.place(x=330,y=190)\r\n\r\nbtnOption4 = tk.Button(bottomFrame, text=fourth_option[0], font=('arial',8,'bold'), bg = 'black', fg = 'white', bd = 0, activebackground='black',\r\nactiveforeground='white', cursor=\"hand2\", wraplength=150, justify=LEFT)\r\nbtnOption4.place(x=370, y =180)\r\n#--------------------------------------------------progressbar--------------------\r\nprogressbarA = Progressbar(root, orient=VERTICAL, length=120)\r\nprogressbarB = Progressbar(root, orient=VERTICAL, length=120)\r\nprogressbarC = Progressbar(root, orient=VERTICAL, length=120)\r\nprogressbarD = Progressbar(root, orient=VERTICAL, length=120)\r\n\r\nlblprogressbarA = tk.Label(root, text ='A', font=('arial', 20,'bold'), bg='black',fg='white')\r\nlblprogressbarB = tk.Label(root, text ='B', font=('arial', 20,'bold'), bg='black',fg='white')\r\nlblprogressbarC = tk.Label(root, text ='C', font=('arial', 20,'bold'), bg='black',fg='white')\r\nlblprogressbarD = tk.Label(root, text ='D', font=('arial', 20,'bold'), bg='black',fg='white')\r\n\r\nbtnOption1.bind(\"\", select)\r\nbtnOption2.bind(\"\", select)\r\nbtnOption3.bind(\"\", select)\r\nbtnOption4.bind(\"\", select)\r\n\r\n#excecute tkinter\r\nroot.mainloop()", "sub_path": "ailatrieuphu.py", "file_name": "ailatrieuphu.py", "file_ext": "py", "file_size_in_byte": 18278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pyttsx3.init", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 14, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 16, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 17, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 31, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 34, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 37, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.constants.DISABLED", "line_number": 44, "usage_type": "name"}, {"api_name": "tkinter.constants.DISABLED", "line_number": 71, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 124, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 124, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 125, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 125, "usage_type": "name"}, {"api_name": "tkinter.constants.DISABLED", "line_number": 127, "usage_type": "name"}, {"api_name": "tkinter.PhotoImage", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 138, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 142, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 143, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 144, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 148, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 149, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 150, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 154, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 156, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 159, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 160, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 163, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 164, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 165, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 166, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 167, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 168, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 169, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 170, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 171, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 175, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 179, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 180, "usage_type": "call"}, {"api_name": "tkinter.constants.NORMAL", "line_number": 207, "usage_type": "name"}, {"api_name": "tkinter.constants.NORMAL", "line_number": 208, "usage_type": "name"}, {"api_name": "tkinter.constants.NORMAL", "line_number": 209, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 211, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 220, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 220, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 221, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 221, "usage_type": "name"}, {"api_name": "tkinter.Toplevel", "line_number": 222, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 227, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 230, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 233, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 237, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 245, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 246, "usage_type": "attribute"}, {"api_name": "tkinter.constants.NORMAL", "line_number": 260, "usage_type": "name"}, {"api_name": "tkinter.constants.NORMAL", "line_number": 261, "usage_type": "name"}, {"api_name": "tkinter.constants.NORMAL", "line_number": 262, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 265, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 266, "usage_type": "attribute"}, {"api_name": "tkinter.Toplevel", "line_number": 274, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 279, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 282, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 285, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 289, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 350, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 354, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 357, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 360, "usage_type": "call"}, {"api_name": "tkinter.constants.LEFT", "line_number": 361, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 364, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 367, "usage_type": "call"}, {"api_name": "tkinter.constants.LEFT", "line_number": 368, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 371, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 374, "usage_type": "call"}, {"api_name": "tkinter.constants.LEFT", "line_number": 375, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 378, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 381, "usage_type": "call"}, {"api_name": "tkinter.constants.LEFT", "line_number": 382, "usage_type": "name"}, {"api_name": "tkinter.ttk.Progressbar", "line_number": 385, "usage_type": "call"}, {"api_name": "tkinter.constants.VERTICAL", "line_number": 385, "usage_type": "name"}, {"api_name": "tkinter.ttk.Progressbar", "line_number": 386, "usage_type": "call"}, {"api_name": "tkinter.constants.VERTICAL", "line_number": 386, "usage_type": "name"}, {"api_name": "tkinter.ttk.Progressbar", "line_number": 387, "usage_type": "call"}, {"api_name": "tkinter.constants.VERTICAL", "line_number": 387, "usage_type": "name"}, {"api_name": "tkinter.ttk.Progressbar", "line_number": 388, "usage_type": "call"}, {"api_name": "tkinter.constants.VERTICAL", "line_number": 388, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 390, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 391, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 392, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 393, "usage_type": "call"}]} {"seq_id": "196681281", "text": "from tapiriik.sync import Sync\nfrom tapiriik.database import db\nimport time\nimport datetime\nimport os\nimport signal\nimport sys\nimport subprocess\n\nRun = True\n\noldCwd = os.getcwd()\nWorkerVersion = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"], stdout=subprocess.PIPE, cwd=os.path.dirname(__file__)).communicate()[0].strip()\nos.chdir(oldCwd)\n\ndef sync_interrupt(signal, frame):\n global Run\n Run = False\n\nsignal.signal(signal.SIGINT, sync_interrupt)\n\ndef sync_heartbeat():\n\tdb.sync_workers.update({\"Process\": os.getpid()}, {\"$set\": {\"Heartbeat\": datetime.datetime.utcnow()}})\n\nprint(\"Sync worker starting at \" + datetime.datetime.now().ctime() + \" pid \" + str(os.getpid()))\ndb.sync_workers.update({\"Process\": os.getpid()}, {\"Process\": os.getpid(), \"Heartbeat\": datetime.datetime.utcnow(), \"Version\": WorkerVersion}, upsert=True)\nsys.stdout.flush()\n\nwhile Run:\n Sync.PerformGlobalSync(heartbeat_callback=sync_heartbeat)\n\n time.sleep(5)\n sync_heartbeat()\n\nprint(\"Sync worker shutting down cleanly\")\ndb.sync_workers.remove({\"Process\": os.getpid()})\nsys.stdout.flush()\n", "sub_path": "sync_worker.py", "file_name": "sync_worker.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "os.getcwd", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 13, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 14, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 20, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tapiriik.database.db.sync_workers.update", "line_number": 23, "usage_type": "call"}, {"api_name": "tapiriik.database.db.sync_workers", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tapiriik.database.db", "line_number": 23, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 25, "usage_type": "call"}, {"api_name": "tapiriik.database.db.sync_workers.update", "line_number": 26, "usage_type": "call"}, {"api_name": "tapiriik.database.db.sync_workers", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tapiriik.database.db", "line_number": 26, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tapiriik.sync.Sync.PerformGlobalSync", "line_number": 30, "usage_type": "call"}, {"api_name": "tapiriik.sync.Sync", "line_number": 30, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "tapiriik.database.db.sync_workers.remove", "line_number": 36, "usage_type": "call"}, {"api_name": "tapiriik.database.db.sync_workers", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tapiriik.database.db", "line_number": 36, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 37, "usage_type": "attribute"}]} {"seq_id": "204304919", "text": "import librosa\nimport numpy as np\nimport librosa.display\nimport matplotlib.pyplot as plt\n\n\nclass DataAugmentation:\n\n def __init__(self, noise_factor=0.02, sampling_rate=8000,\n shift_max=1, shift_direction='both',\n pitch_factor=2, speed_factor=1.5, visualization=False):\n \"\"\" Initialization\n\n :param noise_factor: amount of noise to be injected (0.00 - 0.10).\n :param sampling_rate: frequency of the sample in Hz (Generally 8000Hz).\n :param shift_max: how much to shift the audio sample by (number of seconds).\n :param shift_direction: In which direction to shift audio (left, right, both).\n :param pitch_factor: How much to change the pitch of audio (-8 to 8).\n :param speed_factor: Changes speed of the audio (0 to 2)\n :param visualization: display original and augmented data, by default False.\n \"\"\"\n\n self.noise_factor = noise_factor\n self.sampling_rate = sampling_rate\n self.shift_max = shift_max\n self.shift_direction = shift_direction\n self.pitch_factor = pitch_factor\n self.speed_factor = speed_factor\n self.visualization = visualization\n self.data = 0\n self.sr = 0\n\n def noise_injection(self):\n \"\"\"\n Injects random values into the audio array which counts as background\n :return: Augmented data\n \"\"\"\n\n noise = np.random.randn(len(self.data))\n augmented_data = self.data + self.noise_factor * noise\n\n # Cast back to same data type\n augmented_data = augmented_data.astype(type(self.data[0]))\n\n if self.visualization:\n self.visualize(augmented_data)\n\n return augmented_data\n\n def shifting_time(self):\n\n \"\"\"\n Shifting Right is shifting backwards and shifting left is\n shifting forward. Here shift_max is in seconds.\n\n :return: augmented_data\n \"\"\"\n\n shift = np.random.randint(self.sampling_rate * self.shift_max)\n\n if self.shift_direction == 'right':\n shift = -shift\n\n elif self.shift_direction == 'both': # If want to do random shifting.\n direction = np.random.randint(0, 2)\n if direction == 1:\n shift = -shift\n\n augmented_data = np.roll(self.data, shift)\n\n # Silence heading/ tailing\n if shift > 0:\n augmented_data[:shift] = 0\n else:\n augmented_data[shift:] = 0\n\n if self.visualization:\n self.visualize(augmented_data)\n\n return augmented_data\n\n def change_pitch(self):\n\n \"\"\"\n pitch_factor will change the pitch up if positive and down if negative.\n :return augmented_data: returns an ndArray.\n \"\"\"\n augmented_data = librosa.effects.pitch_shift(self.data, self.sampling_rate, self.pitch_factor)\n\n if self.visualization:\n self.visualize(augmented_data)\n\n return augmented_data\n\n def change_speed(self):\n \"\"\"\n Speed_factor should be a float from 0 to 2\n :return: Augmented data\n \"\"\"\n augmented_data = librosa.effects.time_stretch(self.data, self.speed_factor)\n\n if self.visualization:\n self.visualize(augmented_data)\n\n return augmented_data\n\n def load_data(self, file_path):\n \"\"\"\n Loads the data into file\n :return: Augmented data\n \"\"\"\n self.data, self.sr = librosa.load(file_path, sr=None)\n \n def load_numpy(self, data, sr):\n \"\"\"\n Loads the data into file\n :return: Augmented data\n \"\"\"\n self.data, self.sr = data, sr\n\n def visualize(self, augmented_data):\n \"\"\"\n For displaying augmentation. 1st Plot raw data, 2nd Plot augmented data.\n :param augmented_data:\n :return: Displays Plots if visualize is True\n \"\"\"\n plt.figure(figsize=(14, 5))\n librosa.display.waveplot(self.data, sr=self.sr)\n plt.show()\n\n plt.figure(figsize=(14, 5))\n librosa.display.waveplot(augmented_data, sr=self.sr)\n plt.show()\n \n \n", "sub_path": "DataAugmentation.py", "file_name": "DataAugmentation.py", "file_ext": "py", "file_size_in_byte": 4130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.random.randn", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.roll", "line_number": 69, "usage_type": "call"}, {"api_name": "librosa.effects.pitch_shift", "line_number": 88, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "librosa.effects.time_stretch", "line_number": 100, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "librosa.display.waveplot", "line_number": 128, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 128, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "librosa.display.waveplot", "line_number": 132, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 132, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}]} {"seq_id": "457183749", "text": "import requests\nimport os\nfrom lxml import etree\nfrom settings import url, headers\nfrom db import DBManage\nfrom model import Shop\nfrom settings import c_classify\n# ็ฝ‘็ปœ่ฎฟ้—ฎ\ndef request_content(url, headers):\n response = requests.get(url, headers=headers).content.decode('gbk')\n t = etree.HTML(response)\n # print(response)\n # with open('jd.html', 'w', encoding='utf-8') as f:\n # f.write(response)\n return t\ndef request_content_x(url, headers):\n try:\n response = requests.get(url, headers=headers).content.decode('gbk')\n t = etree.HTML(response)\n\n return t\n except:\n pass\ndef photo(url):\n root_path = r'E:\\PythonFramework\\DjangoQShop\\ZhanLi\\Buyer\\static\\buyer\\images'\n #ๅˆฉ็”จsplit()ๅ‡ฝๆ•ฐ่Žทๅ–urlๆœ€ๅŽ็š„ๆ–‡ไปถๅ\n img_name = url.split('/')[-1]\n img_path = root_path + r'\\{0}'.format(img_name)\n try:\n if not os.path.exists(img_path):\n r = requests.get(url)\n with open(img_path, 'wb') as f:\n f.write(r.content)\n print(\"ๆ–‡ไปถไฟๅญ˜ๆˆๅŠŸ\")\n return img_name\n else:\n print(\"ๆ–‡ไปถๅทฒๅญ˜ๅœจ\")\n return 'images'\n except:\n print(\"ๆ‰ง่กŒๅ‡บ้”™\")\n\n#ๅˆ†็ฑป้€‰ๆ‹ฉ\n\n\n# ๆ•ฐๆฎ่งฃๆž\ndef parse_content(tree):\n # print(type(tree))\n shop_info_list = tree.xpath('//div[@class=\"f-sort\"]/a/@href')\n print(shop_info_list)\n # db = DBManage()\n # for shop_info in shop_info_list:\n # c_price = shop_info.xpath('./div[@class=\"gl-i-wrap\"]/div[@class=\"p-price\"]/strong/i/text()')[0]\n # url = shop_info.xpath('./div[@class=\"gl-i-wrap\"]/div[@class=\"p-name p-name-type-2\"]/a/@href')[0]\n # print(url)\n # print(c_price)\n # p_url = shop_info.xpath(\"./div[@class='gl-i-wrap']/div[@class='p-img']/a/img/@source-data-lazy-img\")[0]\n # photo_url = 'https:' + p_url\n # c_picture = photo(photo_url)\n # if not 'https:' in url:\n # full_url = 'https:'+url\n # else:\n # full_url = url\n # tree = request_content_x(full_url, headers)\n # try:\n # c_title = tree.xpath('//div[@class=\"sku-name\"]//text()')[-1].strip()\n # title = c_title.split(' ')\n # print(c_title[:14])\n # title = title[-1] if len(title[-1]) > 10 else c_title[:15]\n # print(title[:15])\n # print('*'*50)\n # xxxx = tree.xpath('//ul[@class=\"parameter2 p-parameter-list\"]/li/text()')\n # print(xxxx)\n # for xx in xxxx:\n # if 'ๅ•†ๅ“ๅ็งฐ' in xx:\n # lists = xx.split('๏ผš')\n # c_title = lists[1]\n # else:\n # c_weight = ' '\n # if 'ๅ•†ๅ“ๆฏ›้‡' in xx:\n # lists = xx.split('๏ผš')\n # c_weight = lists[1]\n # else:\n # c_weight = ' '\n # if 'ๅ•†ๅ“ไบงๅœฐ' or 'ๅ›ฝไบง/่ฟ›ๅฃ' in xx:\n # lists = xx.split('๏ผš')\n # c_CO = lists[1]\n # else:\n # c_CO = ' '\n # if 'ๅฃๅ‘ณ' in xx:\n # lists = xx.split('๏ผš')\n # c_taste = lists[1]\n # else:\n # c_taste = ' '\n #\n # print(c_title, c_price, c_weight, c_CO, c_taste, c_classify, \"images\\\\\"+c_picture)\n # item = Shop(c_title, c_price, c_weight, c_CO, c_taste, c_classify, \"images\\\\\"+c_picture)\n # db.save_item(item)\n # except:\n # pass\n\nif __name__ == '__main__':\n tree = request_content(url, headers)\n parse_content(tree)\n", "sub_path": "Spiders/Goods.py", "file_name": "Goods.py", "file_ext": "py", "file_size_in_byte": 3680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "settings.url", "line_number": 10, "usage_type": "argument"}, {"api_name": "settings.headers", "line_number": 10, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 11, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 11, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "settings.url", "line_number": 18, "usage_type": "argument"}, {"api_name": "settings.headers", "line_number": 18, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 19, "usage_type": "name"}, {"api_name": "settings.url.split", "line_number": 27, "usage_type": "call"}, {"api_name": "settings.url", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "settings.url", "line_number": 31, "usage_type": "argument"}, {"api_name": "settings.url", "line_number": 102, "usage_type": "argument"}, {"api_name": "settings.headers", "line_number": 102, "usage_type": "argument"}]} {"seq_id": "262671948", "text": "## --*--coding=utf-8 --*--\nimport requests\nimport time\nimport math\nimport re\nfrom pyquery import PyQuery as PQ\nfrom bs4 import BeautifulSoup as BS\n\nheader={\n\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"\n}\n\ndef urlstr(url,yeshu):\n '''ๅŠจๆ€ๆ•ด็†url'''\n urlone=\"https://bj.lianjia.com/zufang/%s/pg%d/\"%(url,yeshu)\n # print(urlone)\n return urlone\n\ndef request_html(url,header):\n '''ไผ ๅ…ฅ็ฝ‘ๅ€๏ผŒ่Žทๅพ—่ฏทๆฑ‚่ฟ”ๅ›ž็š„ๅ†…ๅฎน'''\n return_html=\"\"\n re_str=requests.get(url,headers=header)\n if re_str.status_code==200:\n return_html=re_str.text\n else:\n print(\"็ฝ‘้กต่ฎฟ้—ฎไธๆˆๅŠŸ๏ผŒ%s\"%url)\n\n return return_html\n\ndef Xiangxixinxi(Concent_insert,url_str):\n '''\n urlไปฃ่กจ่ฏฆ็ป†ไธ€ๆกไฟกๆฏ็š„ๅ†…ๅฎน็ฝ‘้กต๏ผŒconcentๆ˜ฏlistๅŒ…ๆ‹ฌๆ ‡้ข˜ๅ’Œๅ…ณ้”ฎ่ฏ\n :param Concent_insert:\n :param url_str:\n :return:ๆ—  ๅบ”่ฏฅ็›ดๆŽฅ่ฐƒ็”จ ๆ•ฐๆฎๅบ“ๆ’ๅ…ฅ๏ผˆๅ…ˆๅ†™ๅ…ฅๅ†…ๅญ˜๏ผŒๅ†ๅ†™ๆ•ฐๆฎๅบ“๏ผ‰\n '''\n X_mianji=0\n X_huxing=\"\"\n X_louceng=\"\"\n X_chaoyang=\"\"\n X_xiaoqu=\"\"\n X_weizhi=\"\"\n X_ditie=\"\"\n X_shijian=0\n X_jiage=0\n html_str=request_html(url_str,header)\n items_yuan=BS(html_str,\"lxml\")\n X_jiage=int(items_yuan.find(class_=\"price\").find(attrs={\"class\":\"total\"}).string)##ไปทๆ ผ่Žทๅ–\n\n items=items_yuan.select(\".zf-room p\")\n if items:\n try: ##่Žทๅ–้ข็งฏ\n mianji=items[0].get_text()\n mianjire=re.search(\"(.*?)(\\d+\\.\\d+).*?\",mianji,re.M|re.I)\n X_mianji=float(mianjire.group(2))\n # print(X_mianji)\n except Exception as ee:\n print(\"mianji:\"+ee.args[0])\n\n try:##ๆˆทๅž‹\n huxiang=items[1].get_text()\n hxre=re.search(\"(.*?)๏ผš(.*)\",huxiang)\n X_huxing=hxre.group(2)\n # print(X_huxing)\n except Exception as ee:\n print(\"huxing\"+ee.args[0])\n\n try:##ๆฅผๅฑ‚้ซ˜ๅบฆ\n louceng=items[2].get_text()\n lcre=re.search(\"(.*?)๏ผš(.*)\",louceng)\n X_louceng=lcre.group(2)\n # print(X_louceng)\n except Exception:\n print(\"louceng\")\n\n try:##ๆ–นๅ‘\n chaoyang=items[3].get_text()\n cyre=re.search(\"(.*?)๏ผš(.*)\",chaoyang)\n X_chaoyang=cyre.group(2)\n except Exception:\n print(\"chaoxiang\")\n\n try:##ๅœฐ้“\n ditie=items[4].get_text()\n dtre=re.search(\"(.*?)๏ผš(.*)\",ditie)\n X_ditie=dtre.group(2)\n except Exception:\n print(\"ditie\")\n\n try:##ๅฐๅŒบ\n xiaoqu=items[5].get_text()\n xqre=re.search(\"(.*?)๏ผš(.*)\",xiaoqu)\n X_xiaoqu=xqre.group(2)\n except Exception:\n print(\"xiaoqu\")\n\n try:##ๅœฐ็†ไฝ็ฝฎ\n weizhi=items[6].get_text()\n wzre=re.search(\"(.*?)๏ผš(.*)\",weizhi)\n X_weizhi=wzre.group(2)\n except Exception:\n print(\"weizhi\")\n\n try:##ๅ‘ๅธƒๆ—ถ้—ด\n shijian=items[7].get_text()\n sjre=re.search(\"(.*?)๏ผš(.*)\",shijian)\n X_shijian=sjre.group(2)\n except Exception:\n print(\"shijian\")\n\n Concent_insert+=[X_jiage,X_mianji,X_huxing,X_louceng,X_chaoyang,X_xiaoqu,X_weizhi,X_ditie,X_shijian]\n print(Concent_insert)\n\ndef PqFx(htmlstr):\n '''\n ไธป่ฆๅˆ†ๆžๆ‰“ๅผ€็š„้กตๆ•ฐ้กต้ข๏ผŒ่Žทๅพ—่ฏฅ้กต็š„ไฟกๆฏๅ†…ๅฎนๅ’ŒURL่ฟžๆŽฅ๏ผŒ็„ถๅŽไผ ๅ…ฅ่ฏฆ็ป†ๅˆ†ๆžๆ–นๆณ•ไธญ\n :param htmlstr:\n :return: ไฟกๆฏ็š„ๆกๆ•ฐ๏ผŒไธป่ฆๆ˜ฏ็ป™็ฌฌไธ€ๆฌก็”จๆฅ่ฎก็ฎ—uanๅพช็Žฏๆฌกๆ•ฐ\n '''\n shuju=30 #็ป™ๅˆๅง‹ๅ€ผ๏ผŒๅฆ‚ๆžœ่Žทๅ–ๅคฑ่ดฅ๏ผŒ่ฏฅๆŸฅ่ฏขๅชไผšๅพช็Žฏไธ€ye\n py_str=PQ(htmlstr)\n # shuju=int(py_str(\".list-head.clear>h2 span\").text())\n ####-----------------------------------------------##\n try:\n items=py_str(\"#house-lst li\").items()\n for item in items:\n Concet_insert=[]\n toumu=item(\".info-panel>h2 a\") ## ่ฟžๆŽฅ็š„็ฝ‘ๅ€ๅ’Œ่กจ่ฟฐ\n item_url=toumu.attr[\"href\"]\n item_title=toumu.attr(\"title\")\n\n guanjianci=item(\".view-label.left>span\").text()\n item_gjc=re.sub(\" \",\"+\",guanjianci)\n\n Concet_insert=[item_title,item_gjc]\n # print(item_url)\n Xiangxixinxi(Concet_insert,item_url)\n\n except Exception as rr:\n print(\"็ฝ‘้กตๅ†…ๅฎน่Žทๅ–ๅคฑ่ดฅ--PqFx:\"+rr.args[0])\n\n return shuju\n\ndef Xunhuanrespon(shuju,input_str):\n ''' ไปŽ่ฎฟ้—ฎ็š„้ฆ–้กต่Žทๅพ—๏ผŒๆŸฅ่ฏข็ป“ๆžœไธ€ๅ…ฑๆœ‰ๅคšๅฐ‘ๆกๅ†…ๅฎน๏ผŒ\n ็„ถๅŽ็ฎ—ๅ‡บ้œ€่ฆๅพช็Žฏde้กตๆ•ฐ๏ผŒไปŽ่€Œๅพช็Žฏๅˆ†ๆžๆฏไธ€้กต'''\n ye_shu=int(math.ceil(shuju/30.0))\n for i in range(2,ye_shu+1):\n url_str_x=urlstr(input_str,i)\n htmlstr_x=request_html(url_str_x,header)\n if htmlstr_x!=\"\":\n PqFx(htmlstr_x)\n else:\n pass\n\nif __name__==\"__main__\":\n input_str=input(\"่พ“ๅ…ฅๆŸฅ่ฏข็š„ๅŒบๅŸŸ๏ผš\")\n url_str=urlstr(input_str,1)\n htmlstr=request_html(url_str,header)\n if htmlstr!=\"\":\n shuju=PqFx(htmlstr)\n Xunhuanrespon(shuju,input_str)\n else:\n pass", "sub_path": "้“พๅฎถ็งŸๆˆฟๆ•ฐๆฎ/็ฝ‘้กตๅˆ†ๆžๆ–นๅผ.py", "file_name": "็ฝ‘้กตๅˆ†ๆžๆ–นๅผ.py", "file_ext": "py", "file_size_in_byte": 5080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 47, "usage_type": "call"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "re.M", "line_number": 54, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 54, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 62, "usage_type": "call"}, {"api_name": "re.search", "line_number": 70, "usage_type": "call"}, {"api_name": "re.search", "line_number": 78, "usage_type": "call"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}, {"api_name": "re.search", "line_number": 99, "usage_type": "call"}, {"api_name": "re.search", "line_number": 106, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 121, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 133, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 147, "usage_type": "call"}]} {"seq_id": "321389540", "text": "# Author: ZHANG\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\n\r\n# ๅˆ›ๅปบๆต่งˆๅ™จๅฏน่ฑก\r\n# ๆŒ‡ๅฎšๆต่งˆๅ™จ็ฑปๅž‹๏ผš Chorme Firefox\r\nbrowser = webdriver.Chrome()\r\ntry:\r\n browser.get('https://www.taobao.com/')\r\n inputs = browser.find_element_by_id('q')\r\n inputs.send_keys('MEI ZU')\r\n import time\r\n time.sleep(2)\r\n inputs.clear()\r\n inputs.send_keys('MEIZU 16th')\r\n\r\n # ๅฎšไน‰้ผ ๆ ‡็‚นๅ‡ป\r\n button = browser.find_element_by_class_name('btn-search')\r\n button.click()\r\n\r\nexcept Exception as e:\r\n print(e)\r\nfinally:\r\n browser.close()\r\n\r\n\r\n\r\n", "sub_path": "007 ๅ…ƒ็ด ไบคไบ’ๆ“ไฝœ.py", "file_name": "007 ๅ…ƒ็ด ไบคไบ’ๆ“ไฝœ.py", "file_ext": "py", "file_size_in_byte": 776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}]} {"seq_id": "174733511", "text": "import argparse\nimport base64\nimport os\nimport yaml\n\ndef base64_decode_and_write(filename):\n with open(filename, 'r') as file:\n # Load YAML file\n file_content = yaml.safe_load(file)\n\n # Extract base64 string\n base64_str = file_content.get('options', {}).get('value', '').replace('\\n', '')\n\n # Base64 decode\n decoded_str = base64.b64decode(base64_str)\n\n # Output file name from YAML file\n output_filename = file_content.get('output', {}).get('filename', '')\n\n # Output directory same as input directory\n output_dir = os.path.dirname(os.path.abspath(filename))\n\n # Write the decoded content to the file\n with open(os.path.join(output_dir, output_filename), 'wb') as file:\n file.write(decoded_str)\n\nif __name__ == \"__main__\":\n # Parse command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', metavar='N', type=str, nargs='+',\n help='input files to process')\n args = parser.parse_args()\n\n # Process each file\n for filename in args.filenames:\n base64_decode_and_write(filename)\n\n", "sub_path": "extract_payload_from_yaml.py", "file_name": "extract_payload_from_yaml.py", "file_ext": "py", "file_size_in_byte": 1112, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "yaml.safe_load", "line_number": 9, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}]} {"seq_id": "143203929", "text": "from typing import List\n\n\nclass Solution:\n # https://www.bilibili.com/video/BV1JJ411V7PD?from=search&seid=16375649315385481561\n # DP\n def maxProduct(self, nums: List[int]) -> int:\n maxProductMemo = [None] * len(nums)\n minProductMemo = [None] * len(nums)\n\n maxProductMemo[0] = nums[0]\n minProductMemo[0] = nums[0]\n max_product = nums[0]\n\n for i in range(1, len(nums)):\n maxProductMemo[i] = max(nums[i], maxProductMemo[i - 1] * nums[i], minProductMemo[i - 1] * nums[i])\n minProductMemo[i] = min(nums[i], maxProductMemo[i - 1] * nums[i], minProductMemo[i - 1] * nums[i])\n max_product = max(max_product, maxProductMemo[i])\n return max_product\n\n\nif __name__ == '__main__':\n solution = Solution()\n nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\n result = solution.maxProduct(nums)\n print('Max Product Sub Array:{}'.format(result))\n", "sub_path": "DynamicProgramming/152-MaximumProductSubarray.py", "file_name": "152-MaximumProductSubarray.py", "file_ext": "py", "file_size_in_byte": 920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}]} {"seq_id": "309072569", "text": "import requests\nimport unittest\nimport json\nimport re\n\n\nclass TestClass(unittest.TestCase):\n # def test002Post(self):\n #\n # #bodyๆ•ฐๆฎ\n # keyword={\n # 'csrfmiddlewaretoken':'qOiT4xNs0VSXOkYgZAISr2wAeLf65Ffc4NyBbCtZCnpAq6139fiBLiPofv0BFSfX',\n # 'username':'bobby',\n # 'password':'admin123',\n # 'this_is_the_login_form':'1',\n # 'next':'/'\n # }\n #\n # #header้ƒจๅˆ†็š„้…็ฝฎ\n # headers = {\n # 'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',\n # 'Accept':'image/gif, image/jpeg, image/pjpeg, application/x-ms-application, application/xaml+xml, application/x-ms-xbap, */*',\n # 'Accept-Language':'zh-CN'\n # }\n #\n # #cookie้ƒจๅˆ†็š„้…็ฝฎ\n # cookies = dict(\n # beacon_id='MTAxLjI1MS4xOTuuMTE5LTE0QzZELTUzQkE4OTQ5QjUyNzctNjE',\n # search_test='1',\n # search_r='32'\n # )\n #\n # #get่ฏทๆฑ‚็š„ๆž„้€ \n # res = requests.post(\n # \"https://customer-api.helijia.com/app-customer/transformers/1030/widgets\",\n # data=keyword, # postๆ•ฐๆฎ\n # headers=headers,\n # cookies=cookies\n # )\n #\n # print(res.text)\n # print(res.status_code)\n # self.assertTrue(u'ไปŠๆ—ฅไธŠๆ–ฐ' in res.text)\n\n\n def test001Get(self):\n #header้ƒจๅˆ†็š„้…็ฝฎ\n #header้ƒจๅˆ†็š„้…็ฝฎ\n headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept':'image/gif, image/jpeg, image/pjpeg, application/x-ms-application, application/xaml+xml, application/x-ms-xbap, */*',\n 'Accept-Language':'zh-CN'\n }\n\n #cookie้ƒจๅˆ†็š„้…็ฝฎ\n cookies = dict(\n beacon_id='MTAxLjI1MS4xOTuuMTE5LTE0QzZELTUzQkE4OTQ5QjUyNzctNjE',\n search_test='1',\n search_r='32'\n )\n\n params = []\n\n #get่ฏทๆฑ‚็š„ๆž„้€ \n res = requests.get(\n \"http://127.0.0.1:8000/\",\n headers=headers,\n cookies=cookies,\n params= params\n\n )\n reposon_headers = res.headers\n print(res.text)\n print(\"\\n\")\n print(res.status_code)\n print(\"\\n\")\n print(\"reposon_headers:%s\"%reposon_headers)\n print(type(reposon_headers))\n print(\"\\n\")\n print(res.cookies)\n print(\"\\n\")\n print(res.json)\n print(\"\\n\")\n print(res.content)\n print(\"\\n\")\n # self.assertTrue(u'http://img.cudn.static.helijia.com' in res.text)\n print(str(reposon_headers))\n match = \"csrftoken=(.+?);\"\n re_csr = re.findall(match,str(reposon_headers))\n print(re_csr)\n print(\"\\n\")\n print(type(re_csr))\n\nif __name__ == \"__main__\":\n print(\"1******************1\")\n unittest.main()", "sub_path": "WWAPITest/test/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 2923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 65, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 89, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 96, "usage_type": "call"}]} {"seq_id": "7544807", "text": "from odoo import models, fields, api\nfrom odoo.exceptions import UserError, AccessError\nimport logging\n\nclass archiveMultiplePurchaseOrder(models.TransientModel):\n _name = 'purchase.archive.multiple.wizard'\n _description = \"Archive multiple purchase order wizard\"\n\n purchase_order_ids = fields.Many2many(\n 'purchase.order',\n string=\"\"\n )\n # name = fields.Char(required=True, index=True, copy=False)\n active = fields.Boolean(\n 'Active', default=True,\n help=\"If unchecked, it will allow you to hide the purchase order without removing it.\")\n state = fields.Selection([\n ('draft', 'RFQ'),\n ('sent', 'RFQ Sent'),\n ('to approve', 'To Approve'),\n ('purchase', 'Purchase Order'),\n ('done', 'Locked'),\n ('cancel', 'Cancelled')\n ], string='Status', readonly=True, index=True, copy=False, default='draft', track_visibility='onchange')\n\n @api.model\n def default_get(self, field_names):\n defaults = super().default_get(field_names)\n purchase_order_ids = self.env.context['active_ids']\n logging.warning(purchase_order_ids)\n\n # Raise error when purchase orders are not done and cancel\n self._check_purchase_order_state(purchase_order_ids)\n\n defaults['purchase_order_ids'] = purchase_order_ids\n return defaults\n\n # Note : button_archive_multiple run two times\n @api.multi\n def button_archive_multiple(self):\n self.ensure_one()\n for order in self.with_context(active_test=False).purchase_order_ids:\n order.active = not order.active\n\n def _check_purchase_order_state(self, purchase_order_ids):\n orders = self.env['purchase.order'].browse(purchase_order_ids)\n for order in orders:\n if order.state not in ['done', 'cancel']:\n raise UserError(\"Only 'Cancel' or 'Lock' Purchase Order is allowed \")", "sub_path": "wizard/purchase_archive_multiple.py", "file_name": "purchase_archive_multiple.py", "file_ext": "py", "file_size_in_byte": 1918, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "odoo.models.TransientModel", "line_number": 5, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 5, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 9, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 9, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 17, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 17, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 30, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 26, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 39, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 39, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 49, "usage_type": "call"}]} {"seq_id": "290834399", "text": "import re\nfrom tests import DATA\nfrom collections import Counter\n\n\ndef print_value(func):\n def wrapper(*args):\n func(*args)\n last_ten = ''.join(str(args[0][len(args[0]) - 10:]))\n first_ten = ''.join(str(args[0][:10]))\n print('ะŸะตั€ะฒั‹ะต 10 ะทะฝะฐั‡ะตะฝะธะน:\\n' + first_ten + '\\nะŸะพัะปะตะดะฝะธะต 10 ะทะฝะฐั‡ะตะฝะธะน:\\n' + last_ten)\n return args\n\n return wrapper\n\n\n@print_value\ndef loading(collect, sort_file):\n with open(sort_file, \"r\") as file:\n for line in file:\n simple_key = re.findall(r'(\\b\\w+\\b)', line)[0]\n simple_value = re.findall(r'\\b\\d+\\b', line)[0]\n obj = DATA.Data(simple_key, simple_value, '')\n collect.append(obj)\n return collect\n\n\n@print_value\ndef adding_an_additional_parameter(collect):\n num_list = []\n for i in range(len(collect)):\n num = collect[i].simple_value\n num_list.append(num)\n dict_frequency = Counter(num_list)\n for y in range(len(collect)):\n date_y = collect[y]\n val = date_y.simple_value\n simple_count = dict_frequency.get(val)\n date_y.simple_count = simple_count\n return collect\n\n\n@print_value\ndef sorting(collect, sorting_order):\n if sorting_order.lower() == 'asc':\n sort = True\n else:\n sort = False\n collect.sort(key=lambda data: int(data.simple_count), reverse=sort)\n return collect\n\n\n@print_value\ndef saving_to_a_file(collect, output_file):\n with open(output_file, \"w\") as file:\n for y in range(len(collect)):\n line = str(collect[y]) + '\\n'\n file.write(line)\n\n\ndef load_sort_add_counter_and_save_in_file(sort_file, sorting_order, output_file):\n collect = []\n loading(collect, sort_file)\n adding_an_additional_parameter(collect)\n sorting(collect, sorting_order)\n saving_to_a_file(collect, output_file)\n\n\nload_sort_add_counter_and_save_in_file(\"simple.dat\", \"Asc\", \"simple3.dat\")\n", "sub_path": "tests/Function.py", "file_name": "Function.py", "file_ext": "py", "file_size_in_byte": 1949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 22, "usage_type": "call"}, {"api_name": "tests.DATA.Data", "line_number": 23, "usage_type": "call"}, {"api_name": "tests.DATA", "line_number": 23, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 34, "usage_type": "call"}]} {"seq_id": "152036438", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n'''\nAuthor : Helvio Junior (M4v3r1cK)\nDate : 2019-01-15\nUpdated: 2021-09-17\nhttps://github.com/helviojunior/libs/blob/master/python/xor.py\n'''\n\nimport os, re, sys, getopt, argparse\nimport sys, struct\n\nparser = argparse.ArgumentParser()\nparser.add_argument('text', help='Text to encode with xor')\nparser.add_argument('key', help='xor key')\nparser.add_argument('-f', '--format', default='raw', help='Format to output (c, csharp, python)')\nparser.add_argument('-v', '--variable', default='buffer', help='Buffer variable Name')\n\nargs = parser.parse_args()\n\n\ndef print_err(text):\n sys.stderr.write(text)\n sys.stderr.flush()\n\ndef print_std(data):\n sys.stdout.buffer.write(data)\n sys.stdout.flush()\n\ndef print_output(data):\n\n fmt = args.format\n if fmt != \"raw\":\n var_name = args.variable\n\n if fmt == \"c\":\n txtdata = \"unsigned char %s[] =\" % var_name\n txtdata += \"\\n\\\"\"\n for idx, val in enumerate(data):\n\n if idx != 0 and idx % 16 == 0:\n txtdata += \"\\\"\\n\\\"\"\n\n txtdata += \"\\\\x{0:02x}\".format(val)\n\n txtdata += \"\\\"\\n\"\n print(txtdata)\n\n elif fmt == \"csharp\":\n txtdata = \"byte[] %s = new byte[%d] {\" % (var_name, len(data))\n for idx, val in enumerate(data):\n\n if idx % 16 == 0:\n txtdata += \"\\n\"\n\n txtdata += \"0x{0:02x},\".format(val)\n\n txtdata = txtdata.strip(\",\")\n txtdata += \" };\\n\"\n print(txtdata)\n\n elif fmt == \"python\":\n txtdata = \"%s = b\\\"\\\"\\n\" % var_name\n for idx, val in enumerate(data):\n\n if idx % 16 == 0:\n txtdata += \"%s += b\\\"\" % var_name\n\n txtdata += \"\\\\x{0:02x}\".format(val)\n\n txtdata = txtdata.strip(\",\")\n txtdata += \"\\\"\\n\"\n print(txtdata)\n\n else: # raw\n print_std(data)\n\n\nikey = int(args.key, 0)\nif ikey < 0:\n ikey = 0\n\nif ikey > 255:\n ikey = 255\n\nkey = (ikey).to_bytes(1, byteorder='big')[0]\n\ntext=args.text\n\nif text == \"-\":\n bdata = sys.stdin.buffer.read()\nelse:\n bdata = str.encode(text)\n\nprint_err(\"Encoding data with key 0x%02x\\n\" % key)\nprint_err(\"Input size: %d\\n\" % len(bdata))\n\nodata = bytearray()\n\nfor i in bdata:\n odata.append( i ^ key )\n\nprint_output(odata)\n", "sub_path": "python/leets.py", "file_name": "leets.py", "file_ext": "py", "file_size_in_byte": 2401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.stdout.buffer.write", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stdin.buffer.read", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 92, "usage_type": "attribute"}]} {"seq_id": "323120165", "text": "from django import template\nfrom parsifal.reviews.models import Review\n\nregister = template.Library()\n\n@register.filter\ndef is_visitor(user, review):\n review = Review.objects.get(pk=review.id)\n if review.export_protocol and not user == review.author and not user in review.co_authors.all():\n return True\n\n for visitor in review.visitors.all():\n if user.id == visitor.id:\n return True\n return False\n", "sub_path": "parsifal/authentication/templatetags/is_visitor.py", "file_name": "is_visitor.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.template.Library", "line_number": 4, "usage_type": "call"}, {"api_name": "django.template", "line_number": 4, "usage_type": "name"}, {"api_name": "parsifal.reviews.models.Review.objects.get", "line_number": 8, "usage_type": "call"}, {"api_name": "parsifal.reviews.models.Review.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "parsifal.reviews.models.Review", "line_number": 8, "usage_type": "name"}]} {"seq_id": "25446479", "text": "import datetime\nimport time\nimport feedparser\nimport vk_api\nimport logging\nimport os\n\nfrom blog.SETTING import VK_LOGIN, VK_PASSWORD, SHAPITO__SHOW_GROUT_ID\n\nlogging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s'\n , level=logging.DEBUG\n , filename=os.path.join(os.path.dirname(__file__), \"social_networks.log\")\n )\n\n\nclass PostHeadmarketGroup(object):\n\n def __init__(self):\n self.vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n if not self.vk_session.authorization():\n try:\n self.vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n print(error_msg)\n return\n\n self.vk = self.vk_session.get_api()\n self.rss_feed = ''\n\n def post(self, message, attachments=''):\n post_id = self.vk.wall.post(owner_id=-SHAPITO__SHOW_GROUT_ID, from_group=1, message=message, attachments=attachments)\n return post_id\n\n def wall_create_comment(self, data):\n post_id = data['object']['post_id']\n object_id = data['object']['id']\n response_post_id = self.vk.wall.createComment(owner_id=-SHAPITO__SHOW_GROUT_ID, post_id=post_id, from_group=1, message='let me see...', reply_to_comment=object_id, guid=datetime.datetime.now())\n return response_post_id\n\n def edit(self, post_id, message, attachments):\n post_id = self.vk.wall.edit(owner_id=-SHAPITO__SHOW_GROUT_ID, post_id=post_id, message=message, attachments=attachments)\n return post_id\n\n def fetch_rss(self):\n if self.rss_feed == '':\n self.rss_feed = feedparser.parse('https://hightech.fm/feed.rss')\n return self.rss_feed\n\n def post_pic_edit(self):\n\n post_id = self.vk.wall.edit(owner_id=-SHAPITO__SHOW_GROUT_ID, post_id=72, from_group=1,\n message='post_pic_edit: %s\\n\\n' % datetime.datetime.now()\n )\n return post_id\n\n def notifications_get(self, count_default=30):\n start_time = int(time.mktime(time.strptime('2017-03-16 23:49:00', '%Y-%m-%d %H:%M:%S')))\n end_time = int(time.mktime(time.strptime('2017-03-16 23:59:00', '%Y-%m-%d %H:%M:%S')))\n try:\n notifications = self.vk.notifications.get(count=count_default, start_time=start_time, end_time=end_time)\n except Exception as e:\n logging.debug(e)\n return\n return notifications\n\n def get_film(self, data):\n text = data['object']['text']\n film_name = text.replace('kino:', '')\n try:\n list_films = self.vk.video.search(q=film_name, sort=1, hd=1, filters='long', count=5)\n except Exception as e:\n logging.debug(e)\n return\n\n list_photo_320 = [i['photo_320'] for i in list_films['items']]\n post_id = data['object']['post_id']\n object_id = data['object']['id']\n try:\n response_post_id = self.vk.wall.createComment(\n owner_id=-SHAPITO__SHOW_GROUT_ID,\n post_id=post_id,\n from_group=1,\n message='ะะฐะนะดะตะฝะพ:\\n' + list_photo_320[0],\n reply_to_comment=object_id,\n guid=datetime.datetime.now()\n ) \n except Exception as e:\n logging.debug(e)\n return\n\n return response_post_id\n", "sub_path": "social_networks/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 3461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "vk_api.VkApi", "line_number": 19, "usage_type": "call"}, {"api_name": "blog.SETTING.VK_LOGIN", "line_number": 19, "usage_type": "argument"}, {"api_name": "blog.SETTING.VK_PASSWORD", "line_number": 19, "usage_type": "argument"}, {"api_name": "vk_api.AuthorizationError", "line_number": 23, "usage_type": "attribute"}, {"api_name": "blog.SETTING.SHAPITO__SHOW_GROUT_ID", "line_number": 31, "usage_type": "name"}, {"api_name": "blog.SETTING.SHAPITO__SHOW_GROUT_ID", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}, {"api_name": "blog.SETTING.SHAPITO__SHOW_GROUT_ID", "line_number": 41, "usage_type": "name"}, {"api_name": "feedparser.parse", "line_number": 46, "usage_type": "call"}, {"api_name": "blog.SETTING.SHAPITO__SHOW_GROUT_ID", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 57, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 57, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 58, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 62, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 72, "usage_type": "call"}, {"api_name": "blog.SETTING.SHAPITO__SHOW_GROUT_ID", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 88, "usage_type": "call"}]} {"seq_id": "17819282", "text": "from dataclasses import dataclass\nfrom typing import Callable, Dict, List, Tuple, Union\nfrom raytkUtil import RaytkContext\n\n# noinspection PyUnreachableCode\nif False:\n\t# noinspection PyUnresolvedReferences\n\tfrom _stubs import *\n\n\tclass _ConfigPar(ParCollection):\n\t\tParammode: 'Union[str, Par]'\n\t\tInlineparameteraliases: 'Union[bool, Par]'\n\t\tInlinereadonlyparameters: 'Union[bool, Par]'\n\t\tSimplifynames: 'Union[bool, Par]'\n\t\tGeneratetypedefs: 'Union[bool, Par]'\n\t\tIncludemode: 'Union[str, Par]'\n\n\tclass _OwnerCompPar(_ConfigPar):\n\t\tGlobalprefix: 'Union[DAT, str, Par]'\n\t\tPredeclarations: 'Union[DAT, str, Par]'\n\t\tTextureindexoffset: 'Union[int, Par]'\n\t\tGlobalmacrotable: 'Union[DAT, str, Par]'\n\t\tLibraries: 'Union[str, Par]'\n\t\tBodytemplate: 'Union[DAT, str, Par]'\n\t\tOutputbuffertable: 'Union[DAT, str, Par]'\n\t\tSupportmaterials: 'Union[bool, Par]'\n\t\tShaderbuilderconfig: 'Union[COMP, str, Par]'\n\n\tclass _OwnerComp(COMP):\n\t\tpar: '_OwnerCompPar'\n\n\tclass _ConfigComp(COMP):\n\t\tpar: '_ConfigPar'\n\nclass ShaderBuilder:\n\tdef __init__(self, ownerComp: '_OwnerComp'):\n\t\tself.ownerComp = ownerComp\n\n\tdef configPar(self) -> '_ConfigPar':\n\t\tp = self.ownerComp.par['Shaderbuilderconfig']\n\t\tif p:\n\t\t\to = op(p)\n\t\t\tif o:\n\t\t\t\t# noinspection PyTypeChecker\n\t\t\t\treturn o.par\n\t\treturn self.ownerComp.par\n\n\tdef preprocessDefinitions(self, dat: 'scriptDAT'):\n\t\t# BEFORE definitions are reversed, so a def's inputs are always BELOW it in the table\n\t\tpass\n\n\tdef definitionTable(self) -> 'DAT':\n\t\t# in reverse order (aka declaration order)\n\t\treturn self.ownerComp.op('definitions')\n\n\tdef parameterDetailTable(self) -> 'DAT':\n\t\treturn self.ownerComp.op('param_details')\n\n\tdef outputBufferTable(self) -> 'DAT':\n\t\treturn self.ownerComp.op('output_buffer_table')\n\n\tdef allParamVals(self) -> 'CHOP':\n\t\treturn self.ownerComp.op('all_param_vals')\n\n\tdef buildGlobalPrefix(self):\n\t\treturn wrapCodeSection(self.ownerComp.par.Globalprefix.eval(), 'globalPrefix')\n\n\tdef _createParamProcessor(self) -> '_ParameterProcessor':\n\t\tmode = self.configPar().Parammode.eval()\n\t\tif mode == 'uniformarray':\n\t\t\treturn _VectorArrayParameterProcessor(\n\t\t\t\tself.parameterDetailTable(),\n\t\t\t\tself.allParamVals(),\n\t\t\t\tself.configPar(),\n\t\t\t)\n\t\telse:\n\t\t\traise NotImplementedError(f'Parameter processor not available for mode: {mode!r}')\n\n\tdef buildGlobalDeclarations(self):\n\t\tdefsTable = self.definitionTable()\n\t\tif defsTable.numRows < 2:\n\t\t\tcode = ['#error No input definition']\n\t\telse:\n\t\t\tmainName = defsTable[defsTable.numRows - 1, 'name']\n\t\t\tparamProcessor = self._createParamProcessor()\n\t\t\tcode = paramProcessor.globalDeclarations()\n\t\t\tcode += [\n\t\t\t\tf'#define thismap {mainName}'\n\t\t\t]\n\t\treturn wrapCodeSection(code, 'globals')\n\n\tdef getOpsFromDefinitionColumn(self, column: str):\n\t\tdefsTable = self.definitionTable()\n\t\tif defsTable.numRows < 2 or not defsTable[0, column]:\n\t\t\treturn []\n\t\tresults = []\n\t\tfor cell in defsTable.col(column)[1:]:\n\t\t\tif not cell.val.strip():\n\t\t\t\tcontinue\n\t\t\tpaths = cell.val.strip().split(' ')\n\t\t\tfor path in paths:\n\t\t\t\to = op(path)\n\t\t\t\tif o:\n\t\t\t\t\tresults.append(o)\n\t\treturn results\n\n\tdef _getMacros(self) -> 'List[Tuple[str, str]]':\n\t\ttables = [self.ownerComp.par.Globalmacrotable.eval()]\n\t\ttables += self.getOpsFromDefinitionColumn('macroTable')\n\t\tnamesAndVals = []\n\t\tfor table in tables:\n\t\t\tif not table:\n\t\t\t\tcontinue\n\t\t\tfor row in range(table.numRows):\n\t\t\t\tif table.numCols == 3:\n\t\t\t\t\tif table[row, 0].val in ('0', 'False'):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tname = table[row, 1].val\n\t\t\t\t\tvalue = table[row, 2].val\n\t\t\t\telse:\n\t\t\t\t\tname = table[row, 0].val\n\t\t\t\t\tif table.numCols > 1:\n\t\t\t\t\t\tvalue = table[row, 1].val\n\t\t\t\t\telse:\n\t\t\t\t\t\tvalue = ''\n\t\t\t\tif value:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\tif not name.strip():\n\t\t\t\t\tcontinue\n\t\t\t\tnamesAndVals.append((name, value))\n\t\toutputBuffers = self.outputBufferTable()\n\t\tif outputBuffers.numRows > 1 and outputBuffers.col('macro'):\n\t\t\tfor cell in outputBuffers.col('macro')[1:]:\n\t\t\t\tif cell.val:\n\t\t\t\t\tnamesAndVals.append((cell.val, ''))\n\t\treturn namesAndVals\n\n\tdef buildMacroTable(self, dat: 'DAT'):\n\t\tdat.clear()\n\t\tdat.appendRows([\n\t\t\t[name, value]\n\t\t\tfor name, value in self._getMacros()\n\t\t])\n\n\tdef buildMacroBlock(self):\n\t\tdecls = []\n\t\tfor name, value in self._getMacros():\n\t\t\tif name.startswith('#define'):\n\t\t\t\tdecls.append(name + value)\n\t\t\telse:\n\t\t\t\tdecls.append(f'#define {name} {value}')\n\t\tdecls = _uniqueList(decls)\n\t\tcode = wrapCodeSection(decls, 'macros')\n\t\t# if self.configPar().Inlineparameteraliases:\n\t\t# \tprocessor = self._createParamProcessor()\n\t\t# \treturn processor.processCodeBlock(code)\n\t\treturn code\n\n\tdef getLibraryDats(self, onWarning: Callable[[str], None] = None) -> 'List[DAT]':\n\t\trequiredLibNames = self.ownerComp.par.Librarynames.eval().strip().split(' ') # type: List[str]\n\t\trequiredLibNames = [n for n in requiredLibNames if n]\n\t\tdefsTable = self.definitionTable()\n\t\tif defsTable[0, 'libraryNames']:\n\t\t\tfor cell in defsTable.col('libraryNames')[1:]:\n\t\t\t\tif not cell.val:\n\t\t\t\t\tcontinue\n\t\t\t\tfor name in cell.val.split(' '):\n\t\t\t\t\tif name not in requiredLibNames:\n\t\t\t\t\t\trequiredLibNames.append(name)\n\t\tlibraryOps = self.ownerComp.par.Libraries.evalOPs()\n\t\tdats = [] # type: List[DAT]\n\t\tfor libraryOp in libraryOps:\n\t\t\tif libraryOp.isDAT:\n\t\t\t\tif libraryOp not in dats:\n\t\t\t\t\tdats.append(libraryOp)\n\t\t\telif libraryOp.isCOMP:\n\t\t\t\tnamesToRemove = []\n\t\t\t\tfor name in requiredLibNames:\n\t\t\t\t\tdat = libraryOp.op(name)\n\t\t\t\t\tif not dat:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tdats.append(dat)\n\t\t\t\t\tnamesToRemove.append(name)\n\t\t\t\tfor name in namesToRemove:\n\t\t\t\t\trequiredLibNames.remove(name)\n\t\tif requiredLibNames and onWarning:\n\t\t\tonWarning('Missing libraries: ' + repr(requiredLibNames))\n\t\treturn dats\n\n\tdef buildLibraryIncludes(self, onWarning: Callable[[str], None] = None):\n\t\tmode = str(self.configPar()['Includemode'] or 'includelibs')\n\t\tsupportsInclude = self.ownerComp.op('support_table')['include', 1] == '1'\n\t\tif mode == 'includelibs' and not supportsInclude:\n\t\t\tinlineAll = True\n\t\telse:\n\t\t\tinlineAll = mode == 'inlineall'\n\t\tlibraries = self.getLibraryDats(onWarning)\n\t\tif inlineAll:\n\t\t\tlibBlocks = [\n\t\t\t\tf'// Library: <{lib.path}>\\n{lib.text}'\n\t\t\t\tfor lib in libraries\n\t\t\t]\n\t\telse:\n\t\t\tlibBlocks = [\n\t\t\t\tf'#include <{lib.path}>'\n\t\t\t\tfor lib in libraries\n\t\t\t]\n\t\treturn wrapCodeSection(libBlocks, 'libraries')\n\n\tdef buildOpDataTypedefBlock(self):\n\t\tdefsTable = self.definitionTable()\n\t\ttypedefs = []\n\t\tmacros = []\n\t\tcoordTypeAdaptFuncs = {\n\t\t\t'float': 'adaptAsFloat',\n\t\t\t'vec2': 'adaptAsVec2',\n\t\t\t'vec3': 'adaptAsVec3',\n\t\t}\n\t\treturnTypeAdaptFuncs = {\n\t\t\t'float': 'adaptAsFloat',\n\t\t\t'vec4': 'adaptAsVec4',\n\t\t}\n\t\tfor row in range(1, defsTable.numRows):\n\t\t\tname = str(defsTable[row, 'name'])\n\t\t\tcoordType = str(defsTable[row, 'coordType'])\n\t\t\tcontextType = str(defsTable[row, 'contextType'])\n\t\t\treturnType = str(defsTable[row, 'returnType'])\n\t\t\ttypedefs += [\n\t\t\t\tf'#define {name}_CoordT {coordType}',\n\t\t\t\tf'#define {name}_ContextT {contextType}',\n\t\t\t\tf'#define {name}_ReturnT {returnType}',\n\t\t\t]\n\t\t\tmacros += [\n\t\t\t\tf'#define {name}_COORD_TYPE_{coordType}',\n\t\t\t\tf'#define {name}_CONTEXT_TYPE_{contextType}',\n\t\t\t\tf'#define {name}_RETURN_TYPE_{returnType}',\n\t\t\t\tf'#define {name}_asCoordT {coordTypeAdaptFuncs[coordType]}',\n\t\t\t]\n\t\t\tif returnType in returnTypeAdaptFuncs:\n\t\t\t\tmacros.append(f'#define {name}_asReturnT {returnTypeAdaptFuncs[returnType]}')\n\t\tif typedefs:\n\t\t\tlines = typedefs + [''] + macros\n\t\telse:\n\t\t\tlines = []\n\t\treturn wrapCodeSection(lines, 'opDataTypedefs')\n\n\tdef buildPredeclarations(self):\n\t\treturn wrapCodeSection(self.ownerComp.par.Predeclarations.eval(), 'predeclarations')\n\n\tdef _buildParameterExprs(self) -> 'List[Tuple[str, Union[str, float]]]':\n\t\tparamDetails = self.parameterDetailTable()\n\t\tif paramDetails.numRows < 2:\n\t\t\treturn []\n\t\tsuffixes = 'xyzw'\n\t\tpartAliases = [] # type: List[Tuple[str, Union[str, float]]]\n\t\tmainAliases = [] # type: List[Tuple[str, Union[str, float]]]\n\t\tinlineReadOnly = bool(self.configPar()['Inlinereadonlyparameters'])\n\t\tparamVals = self.allParamVals()\n\t\tparamTuplets = _ParamTupletSpec.fromTableRows(paramDetails)\n\t\tfor i, paramTuplet in enumerate(paramTuplets):\n\t\t\tshouldInline = inlineReadOnly and paramTuplet.isReadOnly and paramTuplet.isPresentInChop(paramVals)\n\t\t\tsize = len(paramTuplet.parts)\n\t\t\tif size == 1:\n\t\t\t\tif shouldInline:\n\t\t\t\t\tmainAliases.append((paramTuplet.parts[0], float(paramVals[paramTuplet.parts[0]])))\n\t\t\t\telse:\n\t\t\t\t\tmainAliases.append((paramTuplet.parts[0], f'vecParams[{i}].x'))\n\t\t\telse:\n\t\t\t\tif shouldInline:\n\t\t\t\t\tpartVals = [float(paramVals[part]) for part in paramTuplet.parts]\n\t\t\t\t\tvalsExpr = ','.join(str(v) for v in partVals)\n\t\t\t\t\tmainAliases.append((paramTuplet.tuplet, f'vec{size}({valsExpr})'))\n\t\t\t\t\tfor partI, partVal in enumerate(partVals):\n\t\t\t\t\t\tpartAliases.append((paramTuplet.parts[partI], partVal))\n\t\t\t\telse:\n\t\t\t\t\tif size == 4:\n\t\t\t\t\t\tmainAliases.append((paramTuplet.tuplet, f'vecParams[{i}]'))\n\t\t\t\t\telse:\n\t\t\t\t\t\tmainAliases.append((paramTuplet.tuplet, f'vec{size}(vecParams[{i}].{suffixes[:size]})'))\n\t\t\t\t\tfor partI, partName in enumerate(paramTuplet.parts):\n\t\t\t\t\t\tpartAliases.append((partName, f'vecParams[{i}].{suffixes[partI]}'))\n\t\treturn partAliases + mainAliases\n\n\tdef buildParameterAliases(self):\n\t\tparamProcessor = self._createParamProcessor()\n\t\tdecls = paramProcessor.paramAliases()\n\t\treturn wrapCodeSection(decls, 'paramAliases')\n\n\tdef processParametersInCode(self, code: str):\n\t\tparamProcessor = self._createParamProcessor()\n\t\treturn paramProcessor.processCodeBlock(code)\n\n\tdef buildTextureDeclarations(self):\n\t\ttextureTable = self.ownerComp.op('texture_table')\n\t\toffset = int(self.ownerComp.par.Textureindexoffset)\n\t\tindexByType = {\n\t\t\t'2d': offset,\n\t\t\t'3d': 0,\n\t\t\t'cube': 0,\n\t\t}\n\t\tarrayByType = {\n\t\t\t'2d': 'sTD2DInputs',\n\t\t\t'3d': 'sTD3DInputs',\n\t\t\t'cube': 'sTDCubeInputs',\n\t\t}\n\t\tdecls = []\n\t\tfor i in range(1, textureTable.numRows):\n\t\t\tname = str(textureTable[i, 'name'])\n\t\t\ttexType = str(textureTable[i, 'type'] or '2d')\n\t\t\tif texType not in indexByType:\n\t\t\t\traise Exception(f'Invalid texture type for {name}: {texType!r}')\n\t\t\tindex = indexByType[texType]\n\t\t\tdecls.append(f'#define {name} {arrayByType[texType]}[{index}]')\n\t\t\tindexByType[texType] = index + 1\n\t\treturn wrapCodeSection(decls, 'textures')\n\n\tdef buildBufferDeclarations(self):\n\t\tbufferTable = self.ownerComp.op('buffer_table')\n\t\tdecls = []\n\t\tfor i in range(1, bufferTable.numRows):\n\t\t\tname = bufferTable[i, 'name']\n\t\t\tdataType = bufferTable[i, 'type']\n\t\t\tuniType = bufferTable[i, 'uniformType']\n\t\t\tn = int(bufferTable[i, 'length'])\n\t\t\tif uniType == 'uniformarray':\n\t\t\t\tdecls.append(f'uniform {dataType} {name}[{n}];')\n\t\t\telif uniType == 'texturebuffer':\n\t\t\t\tdecls.append(f'uniform samplerBuffer {name};')\n\t\treturn wrapCodeSection(decls, 'buffers')\n\n\tdef buildMaterialDeclarations(self):\n\t\tif not self.ownerComp.par.Supportmaterials:\n\t\t\treturn ' '\n\t\tmaterialTable = self.ownerComp.op('material_table')\n\t\tif materialTable.numRows < 2:\n\t\t\treturn ' '\n\t\ti = 1001\n\t\tdecls = []\n\t\tfor name in materialTable.col('material')[1:]:\n\t\t\tdecls.append(f'#define {name} {i}')\n\t\t\ti += 1\n\t\treturn wrapCodeSection(decls, 'materials')\n\n\tdef buildOutputBufferDeclarations(self):\n\t\toutputBuffers = self.outputBufferTable()\n\t\tif outputBuffers.numRows < 2:\n\t\t\treturn ' '\n\t\tdecls = [\n\t\t\tf'layout(location = {cell.row - 1}) out vec4 {cell.val};'\n\t\t\tfor cell in outputBuffers.col('name')[1:]\n\t\t]\n\t\treturn wrapCodeSection(decls, 'outputBuffers')\n\n\tdef buildOutputInitBlock(self):\n\t\toutputBuffers = self.outputBufferTable()\n\t\treturn wrapCodeSection(\n\t\t\t[\n\t\t\t\t'void initOutputs() {'\n\t\t\t] +\n\t\t\t[\n\t\t\t\tf'{cell.val} = vec4(0.);'\n\t\t\t\tfor cell in outputBuffers.col('name')[1:]\n\t\t\t] + [\n\t\t\t\t'}'\n\t\t\t],\n\t\t\t'outputInit',\n\t\t)\n\n\tdef buildOpGlobalsBlock(self):\n\t\tdats = self.getOpsFromDefinitionColumn('opGlobalsPath')\n\t\treturn wrapCodeSection(dats, 'opGlobals')\n\n\tdef buildInitBlock(self):\n\t\tdats = self.getOpsFromDefinitionColumn('initPath')\n\t\tcode = _combineCode(dats)\n\t\tif not code.strip():\n\t\t\treturn ' '\n\t\treturn wrapCodeSection([\n\t\t\t'#define RAYTK_HAS_INIT',\n\t\t\t'void init() {',\n\t\t\tcode,\n\t\t\t'}',\n\t\t], 'init')\n\n\tdef buildFunctionsBlock(self):\n\t\tdats = self.getOpsFromDefinitionColumn('functionPath')\n\t\treturn wrapCodeSection(dats, 'functions')\n\n\tdef buildBodyBlock(self, materialTable: 'DAT'):\n\t\tbodyDat = self.ownerComp.par.Bodytemplate.eval()\n\t\tcode = bodyDat.text if bodyDat else ''\n\t\tif not code:\n\t\t\treturn ' '\n\t\tif _materialParagraphPlaceholder in code:\n\t\t\tmaterialBlock = self._buildMaterialBlock(materialTable)\n\t\t\tcode = code.replace(_materialParagraphPlaceholder, materialBlock, 1)\n\t\treturn wrapCodeSection(code, 'body')\n\n\t@staticmethod\n\tdef _buildMaterialBlock(materialTable: 'DAT'):\n\t\tif materialTable.numRows < 2:\n\t\t\treturn ''\n\t\toutput = ''\n\t\tfor nameCell, pathCell in materialTable.rows()[1:]:\n\t\t\tif not nameCell:\n\t\t\t\tcontinue\n\t\t\tcodeDat = op(pathCell)\n\t\t\tmaterialCode = codeDat.text if codeDat else ''\n\t\t\toutput += f'else if(m == {nameCell.val}) {{\\n'\n\t\t\toutput += materialCode + '\\n}'\n\t\treturn output\n\n\tdef buildValidationErrors(self, dat: 'DAT'):\n\t\tdat.clear()\n\t\tif RaytkContext().develMode():\n\t\t\treturn\n\t\ttoolkitVersions = {} # type: Dict[str, int]\n\t\tdefsTable = self.definitionTable()\n\t\tif defsTable.numRows < 2 or not defsTable[0, 'toolkitVersion']:\n\t\t\treturn\n\t\tfor i in range(1, defsTable.numRows):\n\t\t\tversion = str(defsTable[i, 'toolkitVersion'] or '')\n\t\t\tif version != '':\n\t\t\t\ttoolkitVersions[version] = 1 + toolkitVersions.get(version, 0)\n\t\tif len(toolkitVersions) > 1:\n\t\t\terror = f'Toolkit version mismatch ({\", \".join(list(toolkitVersions.keys()))})'\n\t\t\tdat.appendRow(['path', 'level', 'message'])\n\t\t\tdat.appendRow([parent().path, 'warning', error])\n\n_materialParagraphPlaceholder = '// #include '\n\n@dataclass\nclass _ParamTupletSpec:\n\ttuplet: str\n\tparts: Tuple[str]\n\tisReadOnly: bool\n\n\tdef isPresentInChop(self, chop: 'CHOP'):\n\t\treturn any([chop[part] is not None for part in self.parts])\n\n\t@classmethod\n\tdef fromRow(cls, dat: 'DAT', row: int):\n\t\tparts = []\n\t\tfor i in range(1, 5):\n\t\t\tcell = dat[row, 'part' + str(i)]\n\t\t\tif not cell.val:\n\t\t\t\tbreak\n\t\t\tparts.append(cell.val)\n\t\treturn cls(\n\t\t\ttuplet=str(dat[row, 'tuplet']),\n\t\t\tparts=tuple(parts),\n\t\t\tisReadOnly='readOnly' in str(dat[row, 'status'] or ''),\n\t\t)\n\n\t@classmethod\n\tdef fromTableRows(cls, dat: 'DAT') -> 'List[_ParamTupletSpec]':\n\t\tif not dat or dat.numRows < 2:\n\t\t\treturn []\n\t\treturn [\n\t\t\tcls.fromRow(dat, row)\n\t\t\tfor row in range(1, dat.numRows)\n\t\t]\n\n@dataclass\nclass _ParamExpr:\n\tname: str\n\texpr: Union[str, float]\n\ttype: str\n\n@dataclass\nclass _UniformSpec:\n\tpass\n\nclass _ParameterProcessor:\n\tdef __init__(\n\t\t\tself,\n\t\t\tparamDetailTable: 'DAT',\n\t\t\tparamVals: 'CHOP',\n\t\t\tconfigPar: '_ConfigPar',\n\t):\n\t\tself.paramDetailTable = paramDetailTable\n\t\tself.hasParams = paramDetailTable.numRows > 1\n\t\tself.useConstantReadOnly = configPar.Inlinereadonlyparameters\n\t\tself.inlineAliases = configPar.Inlineparameteraliases\n\t\tself.paramVals = paramVals\n\t\tself.aliasMode = str(configPar['Paramaliasmode'] or 'macro')\n\n\tdef globalDeclarations(self) -> List[str]:\n\t\traise NotImplementedError()\n\n\tdef _generateParamExprs(self) -> List[_ParamExpr]:\n\t\traise NotImplementedError()\n\n\tdef paramAliases(self) -> List[str]:\n\t\traise NotImplementedError()\n\n\tdef processCodeBlock(self, code: str) -> str:\n\t\traise NotImplementedError()\n\nclass _VectorArrayParameterProcessor(_ParameterProcessor):\n\tdef globalDeclarations(self) -> List[str]:\n\t\tparamCount = max(1, self.paramDetailTable.numRows - 1)\n\t\treturn [\n\t\t\tf'uniform vec4 vecParams[{paramCount}];',\n\t\t]\n\n\tdef _generateParamExprs(self) -> List[_ParamExpr]:\n\t\tparamExprs = [] # type: List[_ParamExpr]\n\t\tsuffixes = 'xyzw'\n\t\tparamTuplets = _ParamTupletSpec.fromTableRows(self.paramDetailTable)\n\t\tfor i, paramTuplet in enumerate(paramTuplets):\n\t\t\tuseConstant = self.useConstantReadOnly and paramTuplet.isReadOnly and paramTuplet.isPresentInChop(self.paramVals)\n\t\t\tsize = len(paramTuplet.parts)\n\t\t\tif size == 1:\n\t\t\t\tname = paramTuplet.parts[0]\n\t\t\t\tparamExprs.append(_ParamExpr(\n\t\t\t\t\tname,\n\t\t\t\t\trepr(float(self.paramVals[name])) if useConstant else f'vecParams[{i}].x',\n\t\t\t\t\t'float'\n\t\t\t\t))\n\t\t\telse:\n\t\t\t\tif useConstant:\n\t\t\t\t\tpartVals = [float(self.paramVals[part]) for part in paramTuplet.parts]\n\t\t\t\t\tvalsExpr = ','.join(str(v) for v in partVals)\n\t\t\t\t\tparType = f'vec{size}'\n\t\t\t\t\tparamExprs.append(_ParamExpr(paramTuplet.tuplet, f'{parType}({valsExpr})', parType))\n\t\t\t\t\tfor partI, partVal in enumerate(partVals):\n\t\t\t\t\t\tparamExprs.append(_ParamExpr(paramTuplet.parts[partI], partVal, 'float'))\n\t\t\t\telse:\n\t\t\t\t\tif size == 4:\n\t\t\t\t\t\tparamExprs.append(_ParamExpr(paramTuplet.tuplet, f'vecParams[{i}]', 'vec4'))\n\t\t\t\t\telse:\n\t\t\t\t\t\tparType = f'vec{size}'\n\t\t\t\t\t\tparamExprs.append(_ParamExpr(\n\t\t\t\t\t\t\tparamTuplet.tuplet,\n\t\t\t\t\t\t\tf'{parType}(vecParams[{i}].{suffixes[:size]})',\n\t\t\t\t\t\t\tparType\n\t\t\t\t\t\t))\n\t\t\t\t\tfor partI, partName in enumerate(paramTuplet.parts):\n\t\t\t\t\t\tparamExprs.append(_ParamExpr(partName, f'vecParams[{i}].{suffixes[partI]}', 'float'))\n\t\treturn paramExprs\n\n\tdef paramAliases(self) -> List[str]:\n\t\tif not self.hasParams:\n\t\t\treturn []\n\t\t# if self.inlineAliases:\n\t\t# \treturn []\n\t\tif self.aliasMode == 'globalvar':\n\t\t\treturn [\n\t\t\t\tf'{paramExpr.type} {paramExpr.name} = {paramExpr.expr};'\n\t\t\t\tfor paramExpr in self._generateParamExprs()\n\t\t\t]\n\t\telse:\n\t\t\treturn [\n\t\t\t\tf'#define {paramExpr.name} {paramExpr.expr}'\n\t\t\t\tfor paramExpr in self._generateParamExprs()\n\t\t\t]\n\n\tdef processCodeBlock(self, code: str) -> str:\n\t\tif not self.inlineAliases or not code:\n\t\t\treturn code\n\t\tfor paramExpr in self._generateParamExprs():\n\t\t\tcode = code.replace(paramExpr.name, paramExpr.expr)\n\t\treturn code\n\ndef _stringify(val: 'Union[str, DAT]'):\n\tif val is None:\n\t\treturn ''\n\tif isinstance(val, DAT):\n\t\treturn val.text\n\treturn str(val)\n\ndef _combineCode(code: 'Union[str, DAT, List[Union[str, DAT]]]'):\n\tif isinstance(code, list):\n\t\tcombined = ''\n\t\tfor item in code:\n\t\t\tval = _stringify(item)\n\t\t\tif val:\n\t\t\t\tcombined += val + '\\n'\n\t\treturn combined\n\telse:\n\t\treturn _stringify(code)\n\ndef wrapCodeSection(code: 'Union[str, DAT, List[Union[str, DAT]]]', name: str):\n\tcode = _combineCode(code)\n\tif not code:\n\t\t# return a non-empty string in order to force DATs to be text when using dat.write()\n\t\treturn ' '\n\treturn f'///----BEGIN {name}\\n{code}\\n///----END {name}'\n\ndef updateLibraryMenuPar(libsComp: 'COMP'):\n\tp = parent().par.Librarynames # type: Par\n\tlibs = libsComp.findChildren(type=DAT, maxDepth=1, tags=['library'])\n\tlibs.sort(key=lambda l: -l.nodeY)\n\tp.menuNames = [lib.name for lib in libs]\n\ndef _uniqueList(items: list):\n\tresults = []\n\tfor item in items:\n\t\tif item not in results:\n\t\t\tresults.append(item)\n\treturn results\n", "sub_path": "src/components/shaderBuilder/shaderBuilder.py", "file_name": "shaderBuilder.py", "file_ext": "py", "file_size_in_byte": 18461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "typing.Callable", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 190, "usage_type": "name"}, {"api_name": "raytkUtil.RaytkContext", "line_number": 415, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 435, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 432, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 467, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 464, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 470, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 488, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 491, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 494, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 501, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 507, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 543, "usage_type": "name"}]} {"seq_id": "22152820", "text": "from wtforms.validators import ValidationError\n\n\ndef validate(data, rules=None):\n if rules is None:\n assert type(data) is dict\n\n error_messages = None\n for name, field in data.items():\n validation = validate(field['data'], field['rules'])\n if not validation.validated:\n if error_messages is None:\n error_messages = {name: validation.error_message}\n else:\n error_messages[name] = validation.error_message\n\n return error_messages\n\n return Validator(data, rules)\n\n\nclass Validator(object):\n\n validated = False\n error_message = ''\n\n def __init__(self, data, rules):\n self.data = data\n self.rules = rules\n\n try:\n for rule in iter(self.rules):\n rule(self.data)\n except ValidationError as e:\n self.error_message = str(e)\n else:\n self.validated = True\n\n\nclass Rules(object):\n \"\"\"้ชŒ่ฏ่ง„ๅˆ™\"\"\"\n class Required(object):\n def __init__(self, message='', data=None):\n self.message = message\n self.data = data\n\n def __call__(self, data):\n if self.data is None:\n rule = data is not None and data != str() and data != list()\n else:\n if type(data) is list:\n rule = set(self.data) - set(data) != set(self.data)\n else:\n rule = data in self.data\n\n if not rule:\n raise ValidationError(self.message)\n\n class Unique(object):\n def __init__(self, model, field, message='่ฏฅๅ†…ๅฎนๅทฒๅญ˜ๅœจ'):\n self.model = model\n self.field = field\n self.message = message\n\n def __call__(self, form, field):\n check = self.model.query.filter(self.field == field.data).first()\n if check:\n raise ValidationError(self.message)\n", "sub_path": "app/util/validator.py", "file_name": "validator.py", "file_ext": "py", "file_size_in_byte": 1959, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "wtforms.validators.ValidationError", "line_number": 34, "usage_type": "name"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 57, "usage_type": "call"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 68, "usage_type": "call"}]} {"seq_id": "145318850", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 22 12:18:00 2021\n\n@author: Usuario\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread('C:\\\\Users\\\\Usuario\\\\Documents\\\\UNS\\\\Beca EVC CIN\\\\laguna.png')\n\n\ngrid_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nplt.figure(figsize=(20,8))\nplt.imshow(grid_RGB) # Printing the original picture after converting to RGB\n\n\ngrid_HSV = cv2.cvtColor(grid_RGB, cv2.COLOR_RGB2HSV) # Converting to HSV\n\nlower_green = np.array([25,52,72])\nupper_green = np.array([102,255,255])\n\nmask= cv2.inRange(grid_HSV, lower_green, upper_green)\nres = cv2.bitwise_and(img, img, mask=mask) # Generating image with the green part\n\nprint(\"Green Part of Image\")\n\n#lower_blue = np.array([153, 153, 255])\n#upper_blue = np.array([0,0,140])\n\n#mask= cv2.inRange(grid_HSV, lower_blue, upper_blue)\n#res = cv2.bitwise_and(img, img, mask=mask) # Generating image with the green part\n\n#print(\"blue Part of Image\")\nplt.figure(figsize=(20,8))\nplt.imshow(res)\n\nblue_perc = (mask>0).mean()\nprint(blue_perc,\"%\")", "sub_path": "porcentaje.py", "file_name": "porcentaje.py", "file_ext": "py", "file_size_in_byte": 1035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cv2.imread", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HSV", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}]} {"seq_id": "145437451", "text": "import re\nfrom collections import defaultdict\n\nrecords = []\n\nwith open('input.txt', 'r') as f:\n records = list(map(\n lambda x: re.findall(r'\\w+', x)[:7],\n f.read().splitlines())\n )\n\n def record_key(item):\n year, month, day, hour, minute, fi, se = item\n return '{0}-{1}-{2}-{3}-{4}'.format(year, month, day, hour, minute)\n\n records.sort(key=record_key)\n\nguards = defaultdict(lambda: [0]*60)\n\n\ndef init_guards():\n global guards\n\n guard = -1\n g_asleep = -1\n g_up = -1\n\n for year, month, day, hour, minute, f_arg, s_arg in records:\n if f_arg == 'Guard':\n guard = s_arg\n g_asleep = -1\n g_up = -1\n elif f_arg == 'falls':\n g_asleep = int(minute)\n g_up = -1\n elif f_arg == 'wakes':\n g_up = int(minute)\n\n if g_asleep != -1 and g_up != -1:\n for i in range(g_asleep, g_up):\n guards[guard][i] += 1\n\n\ndef solve():\n global guards\n most_minutes_asleep = 0\n most_frequently_asleep = 0\n\n first_part = 0\n second_part = 0\n\n for guard, minutes in guards.items():\n total_minutes_asleep = sum(minutes)\n freq_minute_asleep = max(minutes)\n\n if total_minutes_asleep > most_minutes_asleep:\n most_minutes_asleep = total_minutes_asleep\n first_part = int(guard) * minutes.index(freq_minute_asleep)\n\n if freq_minute_asleep > most_frequently_asleep:\n most_frequently_asleep = freq_minute_asleep\n second_part = int(guard) * minutes.index(freq_minute_asleep)\n\n return first_part, second_part\n\n\ninit_guards()\nfirst_part, second_part = solve()\nprint('First part: {0}'.format(first_part))\nprint('Second part: {0}'.format(second_part))\n", "sub_path": "day_4/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "re.findall", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 18, "usage_type": "call"}]} {"seq_id": "383299969", "text": "from socket import *\nimport json\nimport sys\nfrom time import sleep\n\nusage = 'Usage:\\npython GameClient.py [host] [port]'\n\nclientSocket = None\nmovecount = ''\n\n\ndef communicate(message):\n global clientSocket\n clientSocket.send(message.encode())\n resp = clientSocket.recv(1024).decode()\n return json.loads(resp)\n\n\ndef update():\n global movecount\n # Continue to send for opponent's move or game end\n while (1):\n sleep(1)\n update_json = communicate('update ' + movecount)\n content = update_json['content']\n # Check if board has changed\n if (update_json['status'] == '200 OK'):\n print(content[2:])\n movecount = content[0:1]\n return\n\n\ndef place(command):\n # Send place request\n resp_json = communicate(command[0] + ' ' + command[1])\n print(resp_json['content'][2:])\n if (resp_json['status'] == '400 ERROR'):\n return\n global movecount\n movecount = resp_json['content'][0:1]\n if (movecount == '!'):\n return\n # Check for updates\n update()\n\n\ndef login(command):\n resp_json = communicate(command[0] + ' ' + command[1])\n print(resp_json['content'])\n if (resp_json['content'][0:1] == 'G'):\n return\n\n while (1):\n sleep(1)\n meet_json = communicate('meet')\n # Check if additional player logged in\n if (meet_json['status'] == '200 OK'):\n print(meet_json['content'])\n update()\n return\n\n\ndef check_command(command):\n command = command.split()\n if len(command) == 1:\n if command[0] == \"help\":\n return True\n elif command[0] == \"exit\":\n return True\n elif len(command) == 2:\n if command[0] == \"login\":\n login(command)\n return False\n elif command[0] == \"place\":\n place(command)\n return False\n print('Unsupported command.')\n return False\n\n\ndef main(argv):\n if (len(argv) != 3):\n print(usage)\n return 1\n\n host = argv[1]\n port = int(argv[2])\n global clientSocket\n clientSocket = socket(AF_INET, SOCK_STREAM)\n clientSocket.connect((host, port))\n\n while(1):\n command = input('ttt> ')\n # Check if command can be handled with one message to server\n if (check_command(command)):\n resp_json = communicate(command)\n print(resp_json['content'])\n if (command == 'exit'):\n break\n return 0\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "sub_path": "single/GameClient.py", "file_name": "GameClient.py", "file_ext": "py", "file_size_in_byte": 2525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 104, "usage_type": "attribute"}]} {"seq_id": "472061070", "text": "import threading\nimport time\nimport math\nfrom jira import JIRA\nfrom everisJIO import everisJIO\nfrom fastUtils import fastUtils\nfrom getSubtasks import getSubtasks\nfrom workloadSQL import workloadSQL\nimport logging\nfrom logger import logger\n\noptions = {\n 'server': 'https://steps.everis.com/jiraesp2/'\n }\n\nlog = logger.getCustomLogger(\"workloadThreads\")\nlog.debug(\" -- Iniciando workloadThreads --\")\n\nnumThreads = 48\nallrunEsp = 'ALLRUN-1079'\n\n\ncargaInicial = \"project='Servicio RUN' and issuetype not in subTaskIssueTypes()\"\nlastDay = \"project='Servicio RUN' and issuetype not in subTaskIssueTypes() and ( updated>-1d or issuefunction in parentsOf('updated > -1d'))\"\nbusqAllRUNEsp = \"Project='Servicio RUN' and key='\"+allrunEsp+\"'\"\n\nprint(\" -- WORKLOADTHREADS START --\")\n\n\n\njira = JIRA(options, auth=('mespinof', '!L1berate45')) \n\ntotalIssues = jira.search_issues(lastDay,startAt=0, maxResults=1).total\nbatchPerThread = int(math.ceil(totalIssues/numThreads))\n\nprint(\"Division: {}\".format(batchPerThread))\n\n\n\nallruns = []\n\ndef getIssues(startPoint, batchResults, thread):\n print(\"\\n---- Se inicia el Thread{} ----\\n\".format(thread))\n print(\"\\n---- Thread{}:\\n---- startPoint:{}\\n---- batchResults{}:\".format(thread,startPoint,batchResults))\n #issues = jira.search_issues(\"project='Servicio RUN' and issuetype not in subTaskIssueTypes()\",startAt=startPoint, maxResults=batchResults)\n issues = jira.search_issues(lastDay,startAt=startPoint, maxResults=batchResults, expand='changelog')\n for x in issues:\n allrun = everisJIO(x)\n allrun.worklog = jira.worklogs(allrun.id)\n count = 0\n for y in x.fields.subtasks:\n try:\n allrun.subtasks.append(getSubtasks.getSubtask(y.key, jira))\n allrun.subtasks[count].worklog = jira.worklogs(y.key)\n except AttributeError:\n print(\"Ha habido un problema recuperando el worklog de la tarea: {}\".format(y.key))\n count+=1\n allruns.append(allrun)\n\nthreads_list = []\nstartPoint = 0\nstartTime = time.time()\n\nprint(\"\\n\\n ---------- INICIAMOS EXTRACCIร“N ---------- \\n\\n\")\nfor i in range(numThreads):\n t = threading.Thread(target = getIssues,\n name = 'thread{}'.format(i),\n args = (startPoint,batchPerThread,i))\n threads_list.append(t)\n t.start()\n startPoint += batchPerThread\n\nfor t in threads_list:\n t.join()\n\nendTime = time.time()\nprint(\"\\n\\n Allruns extraรญdas: {} -- Tiempo transcurrido: {} \\n\\n\".format(len(allruns),(endTime-startTime)))\nprint(\"\\n\\n ---------- FINALIZAMOS EXTRACCIร“N ---------- \\n\\n\")\n\nprint(\"\\n\\n ---------- INSERTAMOS EN BBDD -----------\\n\\n\")\nworkloadSQL.insertRegisters(allruns)\nendTime = time.time()\nprint(\"\\n\\n Allruns grabadas en BBDD: {} -- Tiempo transcurrido: {} \\n\\n\".format(len(allruns),(endTime-startTime)))\nprint(\"\\n\\n ---------- GRABADO EN BBDD -----------\\n\\n\")\n\n", "sub_path": "workloadThreads.py", "file_name": "workloadThreads.py", "file_ext": "py", "file_size_in_byte": 2930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logger.logger.getCustomLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 16, "usage_type": "name"}, {"api_name": "jira.JIRA", "line_number": 31, "usage_type": "call"}, {"api_name": "jira.search_issues", "line_number": 33, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 34, "usage_type": "call"}, {"api_name": "jira.search_issues", "line_number": 46, "usage_type": "call"}, {"api_name": "everisJIO.everisJIO", "line_number": 48, "usage_type": "call"}, {"api_name": "jira.worklogs", "line_number": 49, "usage_type": "call"}, {"api_name": "getSubtasks.getSubtasks.getSubtask", "line_number": 53, "usage_type": "call"}, {"api_name": "getSubtasks.getSubtasks", "line_number": 53, "usage_type": "name"}, {"api_name": "jira.worklogs", "line_number": 54, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "workloadSQL.workloadSQL.insertRegisters", "line_number": 81, "usage_type": "call"}, {"api_name": "workloadSQL.workloadSQL", "line_number": 81, "usage_type": "name"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}]} {"seq_id": "444601731", "text": "# coding:utf-8\n\nfrom bottle import route, post, request, redirect, jinja2_template as template\nimport app.models.Scheduler\nmodel = app.models.Scheduler.Scheduler()\n\n\n@route('/')\ndef index():\n result = model.load()\n return template('index', result = result)\n\n\n@route('/new')\ndef new():\n return template('new')\n\n\n@route('/edit/')\ndef edit(id):\n return template('edit', i=model.edit(id))\n\n\n@post('/done')\ndef done():\n post_data = {}\n post_data['name'] = request.forms.get('name')\n post_data['des'] = request.forms.get('des')\n post_data['id'] = request.forms.get('id')\n post_data['del'] = request.forms.get('del')\n model.done(post_data)\n\n redirect('/')\n", "sub_path": "root/app/controllers/Scheduler.py", "file_name": "Scheduler.py", "file_ext": "py", "file_size_in_byte": 690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "app.models.Scheduler.models.Scheduler.Scheduler", "line_number": 5, "usage_type": "call"}, {"api_name": "app.models.Scheduler.models", "line_number": 5, "usage_type": "attribute"}, {"api_name": "app.models.Scheduler", "line_number": 5, "usage_type": "name"}, {"api_name": "bottle.jinja2_template", "line_number": 11, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 8, "usage_type": "call"}, {"api_name": "bottle.jinja2_template", "line_number": 16, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 14, "usage_type": "call"}, {"api_name": "bottle.jinja2_template", "line_number": 21, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 19, "usage_type": "call"}, {"api_name": "bottle.request.forms.get", "line_number": 27, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 27, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 27, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 28, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 28, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 28, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 29, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 29, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 29, "usage_type": "name"}, {"api_name": "bottle.request.forms.get", "line_number": 30, "usage_type": "call"}, {"api_name": "bottle.request.forms", "line_number": 30, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 30, "usage_type": "name"}, {"api_name": "bottle.redirect", "line_number": 33, "usage_type": "call"}, {"api_name": "bottle.post", "line_number": 24, "usage_type": "call"}]} {"seq_id": "470913208", "text": "\"\"\"add original title\n\nRevision ID: daf0305d8ed1\nRevises: 29afac58afde\nCreate Date: 2020-10-16 11:56:34.109483\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'daf0305d8ed1'\ndown_revision = '29afac58afde'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('videos', sa.Column('original_name', sa.Text(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('videos', 'original_name')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/daf0305d8ed1_add_original_title.py", "file_name": "daf0305d8ed1_add_original_title.py", "file_ext": "py", "file_size_in_byte": 668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} {"seq_id": "143905934", "text": "import webapp2\nfrom google.appengine.api import users\nimport jinja2\nimport os\nimport string\n\n\njinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nclass Error(webapp2.RequestHandler):\n def get(self):\n \n user = users.get_current_user() \n if user:\n \n #set stylesheets needed per page \n specific_urls = \"\"\"\n \n \"\"\"\n \n \n #add the page query to the html\n url = self.request.url\n url = string.split(url, '/')\n Error_template_values = {\n 'page': url[len(url) - 1]\n }\n \n template = jinja_environment.get_template('Page_Content/error.html')\n Error_template = template.render(Error_template_values)\n \n url = users.create_logout_url(self.request.uri)\n nav = \"\"\"\n \n \"\"\" % url\n \n \n template_values = {\n 'specific_urls':specific_urls,\n 'nav': nav,\n 'content': Error_template\n }\n \n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render(template_values))\n else:\n self.redirect('/')\n\napp = webapp2.WSGIApplication([('/.*', Error)], debug=True)\n", "sub_path": "src/Error.py", "file_name": "Error.py", "file_ext": "py", "file_size_in_byte": 1824, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "jinja2.Environment", "line_number": 8, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "webapp2.RequestHandler", "line_number": 11, "usage_type": "attribute"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 14, "usage_type": "name"}, {"api_name": "string.split", "line_number": 25, "usage_type": "call"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 33, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 33, "usage_type": "name"}, {"api_name": "webapp2.WSGIApplication", "line_number": 57, "usage_type": "call"}]} {"seq_id": "299230318", "text": "from django.core.exceptions import MultipleObjectsReturned\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import ListView, DetailView\nfrom django.http import JsonResponse\n\n\nfrom .forms import PrintersListForm, PrinterDetailForm\nfrom .models import CustomWebPage, Printer, PrintOperation, VoteUnit\n\n\ndef vote(request, printer_id):\n printer = get_object_or_404(Printer, pk=printer_id)\n\n plus = request.POST['choice']\n vote = VoteUnit()\n vote.printer_id = printer.id\n vote.rating = 1\n vote.save()\n\n\n # selected_choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect('print:printer_detail', args=(printer.id,))\n\ndef Complaint (request, pk, problem):\n printer = Printer.objects.get(id=pk)\n\n if (request.user not in printer.users.all()):\n new = 1\n if (problem == '1'):\n printer.paper_jam = True\n printer.users.add(request.user)\n printer.save()\n elif (problem == '2'):\n printer.no_paper = True\n printer.users.add(request.user)\n printer.save()\n elif (problem == '3'):\n printer.no_paint = True\n printer.users.add(request.user)\n printer.save()\n elif (problem == '4'):\n printer.problem = True\n printer.users.add(request.user)\n printer.save()\n else:\n pass\n else:\n new = 0\n if (problem == '1'):\n printer.paper_jam = False\n printer.users.remove(request.user)\n printer.save()\n elif (problem == '2'):\n printer.no_paper = False\n printer.users.remove(request.user)\n printer.save()\n elif (problem == '3'):\n printer.no_paint = False\n printer.users.remove(request.user)\n printer.save()\n elif (problem == '4'):\n printer.problem = False\n printer.users.remove(request.user)\n printer.save()\n else:\n pass\n\n return JsonResponse({'new': new})\n\n\ndef custom_page(request, desirable_page_id):\n try:\n selected_page = get_object_or_404(CustomWebPage, page_id=desirable_page_id)\n except (KeyError, MultipleObjectsReturned):\n selected_page = CustomWebPage.objects.filter(page_id=desirable_page_id)[0]\n return render(request, 'print/custom_page.html', {'selected_page': selected_page})\n\n\nclass PrintersList(ListView):\n template_name = \"print/printers_list.html\"\n model = Printer\n\n def dispatch(self, request, *args, **kwargs):\n self.form = PrintersListForm(request.GET)\n self.form.is_valid()\n return super(PrintersList, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Printer.objects.all()\n if self.form.cleaned_data.get('search'):\n queryset = queryset.filter(id=self.form.cleaned_data['search'])\n if self.form.cleaned_data.get('active_only') is not None:\n is_active = self.form.cleaned_data['active_only']\n if is_active == 'true':\n queryset = queryset.filter(is_active=True)\n elif is_active == 'false':\n queryset = queryset.filter(is_active=False)\n if self.form.cleaned_data.get('sort_field'):\n queryset = queryset.order_by(self.form.cleaned_data['sort_field'])\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(PrintersList, self).get_context_data(**kwargs)\n context['form'] = self.form\n return context\n\n\nclass PrinterDetail(DetailView):\n template_name = 'print/printer_detail.html'\n model = Printer\n\n def dispatch(self, request, pk=None, *args, **kwargs):\n self.current_printer_id = pk\n self.form = PrinterDetailForm(request.POST)\n return super(PrinterDetail, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(PrinterDetail, self).get_context_data(**kwargs)\n context['form'] = self.form\n return context\n\n def post(self, request, *args, **kwargs):\n if self.form.is_valid():\n printer = Printer.objects.get(id=self.current_printer_id)\n if self.form.cleaned_data['enable_choice'] == 'true':\n printer.is_active = True\n elif self.form.cleaned_data['enable_choice'] == 'false':\n printer.is_active = False\n\n printer.save()\n return redirect(\"print:printer_detail\", pk=self.current_printer_id)\n # raise forms.ValidationError('ะ’ั‹ะฑะตั€ะธั‚ะต ะดะตะนัั‚ะฒะธะต!')\n #return redirect(\"print:printer_detail\", pk=self.current_printer_id)\n\nclass OperationsListAjax(ListView):\n template_name = \"print/operations_ajax.html\"\n model = PrintOperation\n\nclass PrintersListAjax(ListView):\n template_name = \"print/printers_list_ajax.html\"\n model = Printer\n\nclass PrinterStatusAjax(ListView):\n template_name = \"print/printer_detail_ajax.html\"\n model = Printer\n\nclass OperationDetail(DetailView):\n template_name = 'print/operation_detail.html'\n model = PrintOperation\n\n\nclass PrinterVote(DetailView):\n template_name = 'print/operation_detail.html'\n model = VoteUnit\n", "sub_path": "src/print/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Printer", "line_number": 13, "usage_type": "argument"}, {"api_name": "models.VoteUnit", "line_number": 16, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Printer.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Printer.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Printer", "line_number": 29, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 77, "usage_type": "call"}, {"api_name": "models.CustomWebPage", "line_number": 77, "usage_type": "argument"}, {"api_name": "django.core.exceptions.MultipleObjectsReturned", "line_number": 78, "usage_type": "name"}, {"api_name": "models.CustomWebPage.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "models.CustomWebPage.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.CustomWebPage", "line_number": 79, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Printer", "line_number": 85, "usage_type": "name"}, {"api_name": "forms.PrintersListForm", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Printer.objects.all", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Printer.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Printer", "line_number": 93, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 112, "usage_type": "name"}, {"api_name": "models.Printer", "line_number": 114, "usage_type": "name"}, {"api_name": "forms.PrinterDetailForm", "line_number": 118, "usage_type": "call"}, {"api_name": "models.Printer.objects.get", "line_number": 128, "usage_type": "call"}, {"api_name": "models.Printer.objects", "line_number": 128, "usage_type": "attribute"}, {"api_name": "models.Printer", "line_number": 128, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 139, "usage_type": "name"}, {"api_name": "models.PrintOperation", "line_number": 141, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 143, "usage_type": "name"}, {"api_name": "models.Printer", "line_number": 145, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 147, "usage_type": "name"}, {"api_name": "models.Printer", "line_number": 149, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 151, "usage_type": "name"}, {"api_name": "models.PrintOperation", "line_number": 153, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 156, "usage_type": "name"}, {"api_name": "models.VoteUnit", "line_number": 158, "usage_type": "name"}]} {"seq_id": "1722009", "text": "import numpy as np\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom tqdm import tqdm\r\nfrom dataset.ModelNet40 import RegistrationData,ModelNet40\r\n#from model.Pointnet import PointNet\r\nfrom model.iPCRNet import iPCRNet\r\nfrom model.MeshNet import MeshNet\r\nfrom losses.chamfer_distance import ChamferDistanceLoss\r\nimport transforms3d\r\nfrom losses.rmse_features import RMSEFeaturesLoss\r\nfrom losses.frobenius_norm import FrobeniusNormLoss\r\nfrom operations.transform_functions import PCRNetTransform\r\n\r\nBATCH_SIZE=20\r\nEVAL=False\r\nSTART_EPOCH=0\r\nMAX_EPOCHS=200\r\npretrained='checkpoints/ipcrnet/models2/best_model.t7' #ไฝฟ็”จๆœ€ๅฅฝ็š„ๆจกๅž‹ๅ‚ๆ•ฐๆต‹่ฏ•\r\n\r\n\r\n# Find error metrics.\r\ndef find_errors(igt_R, pred_R, igt_t, pred_t):\r\n # igt_R:\t\t\t\tRotation matrix [3, 3] (source = igt_R * template)\r\n # pred_R: \t\t\tRegistration algorithm's rotation matrix [3, 3] (template = pred_R * source)\r\n # igt_t:\t\t\t\ttranslation vector [1, 3] (source = template + igt_t)\r\n # pred_t: \t\t\tRegistration algorithm's translation matrix [1, 3] (template = source + pred_t)\r\n\r\n # Euler distance between ground truth translation and predicted translation.\r\n igt_t = -np.matmul(igt_R.T, igt_t.T).T\t\t\t# gt translation vector (source -> template)\r\n translation_error = np.sqrt(np.sum(np.square(igt_t - pred_t)))\r\n\r\n # Convert matrix remains to axis angle representation and report the angle as rotation error.\r\n error_mat = np.dot(igt_R, pred_R)\t\t\t\t\t\t\t# matrix remains [3, 3]\r\n _, angle = transforms3d.axangles.mat2axangle(error_mat)\r\n return translation_error, abs(angle*(180/np.pi))\r\n\r\ndef compute_accuracy(igt_R, pred_R, igt_t, pred_t):\r\n errors_temp = []\r\n for igt_R_i, pred_R_i, igt_t_i, pred_t_i in zip(igt_R, pred_R, igt_t, pred_t):\r\n errors_temp.append(find_errors(igt_R_i, pred_R_i, igt_t_i, pred_t_i))\r\n return np.mean(errors_temp, axis=0)\r\n\r\ndef test_one_epoch(device, model, test_loader):\r\n model.eval()\r\n test_loss = 0.0\r\n count = 0\r\n errors = []\r\n\r\n for i, data in enumerate(tqdm(test_loader)):\r\n template, source, igt, igt_R, igt_t = data\r\n\r\n for index in range(5):\r\n template[index] = template[index].to(device) # [B,N,3]\r\n source[index] = source[index].to(device) # [B,N,3]\r\n\r\n # template = template.to(device)\r\n # source = source.to(device)\r\n igt = igt.to(device)\r\n\r\n # source_original = source.clone()\r\n # template_original = template.clone()\r\n igt_t = igt_t - torch.mean(source[4], dim=1).unsqueeze(1)\r\n # source[4] = source[4] - torch.mean(source[4], dim=1, keepdim=True)\r\n # template[4] = template[4] - torch.mean(template[4], dim=1, keepdim=True)\r\n for index in range(3):\r\n source[index] = source[index] - torch.mean(source[index], dim=2, keepdim=True)\r\n template[index] = template[index] - torch.mean(template[index], dim=2, keepdim=True)\r\n source[4] = source[4] - torch.mean(source[4], dim=1, keepdim=True)\r\n template[4] = template[4] - torch.mean(template[4], dim=1, keepdim=True)\r\n\r\n output = model(device, template, source)\r\n est_R = output['est_R']\r\n est_t = output['est_t']\r\n est_T = output['est_T']\r\n\r\n errors.append(compute_accuracy(igt_R.detach().cpu().numpy(), est_R.detach().cpu().numpy(),\r\n igt_t.detach().cpu().numpy(), est_t.detach().cpu().numpy()))\r\n\r\n # transformed_source = torch.bmm(est_R, source.permute(0, 2, 1)).permute(0,2,1) + est_t\r\n loss_val = ChamferDistanceLoss()(template[4], output['transformed_source'])\r\n\r\n test_loss += loss_val.item()\r\n count += 1\r\n\r\n test_loss = float(test_loss)/count\r\n errors = np.mean(np.array(errors), axis=0)\r\n return test_loss, errors[0], errors[1]\r\n\r\nif __name__ == '__main__':\r\n testset = RegistrationData('PCRNet', ModelNet40(part='test'),is_testing=True)\r\n testloader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, drop_last=False, num_workers=4)\r\n device = torch.device('cpu')\r\n\r\n #ptnet = PointNet(emb_dims=1024)\r\n model = iPCRNet(feature_model=MeshNet)\r\n model = model.to(device)\r\n\r\n if pretrained:\r\n model.load_state_dict(torch.load(pretrained, map_location='cpu'))\r\n\r\n model.to(device)\r\n test_loss, translation_error, rotation_error = test_one_epoch(device, model, testloader)\r\n print(\"Test Loss: {}, Rotation Error: {} & Translation Error: {}\".format(test_loss, rotation_error,translation_error))\r\n", "sub_path": "Jinsheng Liu/PCR-MESH/test_PCRNet.py", "file_name": "test_PCRNet.py", "file_ext": "py", "file_size_in_byte": 4530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.matmul", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 34, "usage_type": "call"}, {"api_name": "transforms3d.axangles.mat2axangle", "line_number": 35, "usage_type": "call"}, {"api_name": "transforms3d.axangles", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 42, "usage_type": "call"}, {"api_name": "model.iPCRNet.eval", "line_number": 45, "usage_type": "call"}, {"api_name": "model.iPCRNet", "line_number": 45, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 70, "usage_type": "call"}, {"api_name": "model.iPCRNet", "line_number": 72, "usage_type": "call"}, {"api_name": "losses.chamfer_distance.ChamferDistanceLoss", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "dataset.ModelNet40.RegistrationData", "line_number": 91, "usage_type": "call"}, {"api_name": "dataset.ModelNet40.ModelNet40", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 93, "usage_type": "call"}, {"api_name": "model.iPCRNet", "line_number": 96, "usage_type": "name"}, {"api_name": "model.iPCRNet.iPCRNet", "line_number": 96, "usage_type": "call"}, {"api_name": "model.MeshNet.MeshNet", "line_number": 96, "usage_type": "name"}, {"api_name": "model.iPCRNet", "line_number": 97, "usage_type": "name"}, {"api_name": "model.iPCRNet.to", "line_number": 97, "usage_type": "call"}, {"api_name": "model.iPCRNet.load_state_dict", "line_number": 100, "usage_type": "call"}, {"api_name": "model.iPCRNet", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 100, "usage_type": "call"}, {"api_name": "model.iPCRNet.to", "line_number": 102, "usage_type": "call"}, {"api_name": "model.iPCRNet", "line_number": 102, "usage_type": "name"}, {"api_name": "model.iPCRNet", "line_number": 103, "usage_type": "argument"}]} {"seq_id": "171277742", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"main module.\n\n@route '/'\n@route 'web_hook'\n\"\"\"\n\nimport os\nimport requests\nimport json\n\nfrom flask import Flask\nfrom flask import request\n\nfrom doco.client import Client\n\nfrom models import db\nfrom models import User\nfrom models import Flag\nfrom models import Reply\nfrom modules.bot import Bot\nfrom modules.message import Quiz\nfrom modules.utils import GeneralUtility\nfrom modules.utils import Config\nfrom modules.utils import Header\nfrom modules.webhook import ActionEvents\nfrom modules.webhook import Events\nfrom modules.yahoo_api import KeyPhrase\n\napp = Flask(__name__)\n\n# set database\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\n\ndb.init_app(app)\n\nconfig = Config('./setting.conf')\nACCESS_TOKEN = config.get_access_token()\nENDPOINT_URL = config.get_endpoint_url()\nAPP_ID = config.get_appid()\n\nPROXY = {\n \"https\": os.environ.get('FIXIE_URL', ''),\n \"http\": os.environ.get('FIXIE_URL', '')\n}\n\n\n@app.before_first_request\ndef create_database():\n db.create_all()\n\n if not db.session.query(Flag).filter(Flag.key == \"reply_flag\").count():\n reg = Flag(\"reply_flag\")\n reg.set_value(0)\n db.session.add(reg)\n db.session.commit()\n\n\n@app.route('/')\ndef index_route():\n return 'Running!'\n\n\n@app.route('/web_hook', methods=['POST'])\ndef webhook_route():\n bot = Bot()\n\n # create header\n header = Header(ACCESS_TOKEN)\n content_header = header.content_header()\n\n # request event\n event = request.json['events'][0]\n\n if 'postback' in event:\n try:\n events = ActionEvents(event)\n reply_token = events.replyToken\n except:\n raise\n\n if events.postback.data == \"o\":\n content = bot.text_message_content(reply_token, \"ใ›ใ„ใ‹ใ„ใ ใ‚ˆ๏ผ\")\n else:\n content = bot.text_message_content(reply_token, \"ใ„ใ„ใˆ\")\n\n else:\n try:\n events = Events(event)\n reply_token = events.replyToken\n except:\n raise\n\n userid = events.source.userId\n if not db.session.query(User).filter(User.userid == userid).count():\n reg = User(events.source.userId)\n db.session.add(reg)\n db.session.commit()\n\n # extract user\n user = User.query.filter(User.userid == userid).first()\n # extract reply flag\n reply_flag = Flag.query.filter(Flag.key == \"reply_flag\").first()\n\n reg = Reply(events.source.userId, events.message.text)\n\n try:\n keyphrase = KeyPhrase(APP_ID, events.message.text)\n keywords = keyphrase.keywords\n except:\n keywords = {}\n\n # search image\n utility = GeneralUtility()\n if len(keywords) != 0 and utility.get_match_key(keywords, \"็”ปๅƒ\"):\n image_url = bot.get_image(' '.join(keywords.keys()))\n content = bot.image_message_content(reply_token, image_url)\n elif events.message.text == \"ใชใžใชใž\":\n quiz = Quiz(\"data/quiz.csv\")\n quiz_text, actions = quiz.create_quiz()\n content = bot.buttons_message_content(\n reply_token,\n quiz_text,\n actions\n )\n elif reply_flag.value == 1:\n reply_flag.set_value(0)\n user.set_username(events.message.text)\n\n db.session.add(reply_flag)\n db.session.add(user)\n db.session.commit()\n\n content = bot.text_message_content(\n reply_token,\n \"ใ‚ใ‹ใฃใŸ๏ผใ‚ˆใ‚ใ—ใใญใ€%s!\" % user.username\n )\n elif user.username is None:\n reply_flag.set_value(1)\n\n db.session.add(reply_flag)\n db.session.commit()\n\n content = bot.text_message_content(\n reply_token,\n \"ใ‚ใชใŸใฎใŠๅๅ‰ใฏใชใ‚ใซ๏ผŸ\"\n )\n else:\n docomo_client = Client(apikey=config.get_docomo_apikey())\n content = bot.text_message_content(\n reply_token,\n docomo_client.send(\n utt=events.message.text,\n apiname='Dialogue',\n **{'nickname': user.username}\n )['utt']\n )\n\n request_body = content.get_request_body()\n\n # post request\n res = requests.post(\n ENDPOINT_URL,\n data=json.dumps(request_body),\n headers=content_header,\n proxies=PROXY\n )\n print(res)\n\n return 'Running'\n\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.Flask", "line_number": 31, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.db.init_app", "line_number": 37, "usage_type": "call"}, {"api_name": "models.db", "line_number": 37, "usage_type": "name"}, {"api_name": "modules.utils.Config", "line_number": 39, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.db.create_all", "line_number": 52, "usage_type": "call"}, {"api_name": "models.db", "line_number": 52, "usage_type": "name"}, {"api_name": "models.db.session.query", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Flag", "line_number": 54, "usage_type": "argument"}, {"api_name": "models.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 54, "usage_type": "name"}, {"api_name": "models.Flag.key", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Flag", "line_number": 55, "usage_type": "call"}, {"api_name": "models.db.session.add", "line_number": 57, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 57, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 58, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 58, "usage_type": "name"}, {"api_name": "modules.bot.Bot", "line_number": 68, "usage_type": "call"}, {"api_name": "modules.utils.Header", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "modules.webhook.ActionEvents", "line_number": 79, "usage_type": "call"}, {"api_name": "modules.webhook.Events", "line_number": 91, "usage_type": "call"}, {"api_name": "models.db.session.query", "line_number": 97, "usage_type": "call"}, {"api_name": "models.User", "line_number": 97, "usage_type": "argument"}, {"api_name": "models.db.session", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 97, "usage_type": "name"}, {"api_name": "models.User.userid", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 98, "usage_type": "call"}, {"api_name": "models.db.session.add", "line_number": 99, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 99, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 100, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 100, "usage_type": "name"}, {"api_name": "models.User.query.filter", "line_number": 103, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 103, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 103, "usage_type": "name"}, {"api_name": "models.User.userid", "line_number": 103, "usage_type": "attribute"}, {"api_name": "models.Flag.query.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Flag.query", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.Flag", "line_number": 105, "usage_type": "name"}, {"api_name": "models.Flag.key", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.Reply", "line_number": 107, "usage_type": "call"}, {"api_name": "modules.yahoo_api.KeyPhrase", "line_number": 110, "usage_type": "call"}, {"api_name": "modules.utils.GeneralUtility", "line_number": 116, "usage_type": "call"}, {"api_name": "modules.message.Quiz", "line_number": 121, "usage_type": "call"}, {"api_name": "models.db.session.add", "line_number": 132, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 132, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 133, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 133, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 134, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 134, "usage_type": "name"}, {"api_name": "models.db.session.add", "line_number": 143, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 143, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 144, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 144, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 144, "usage_type": "name"}, {"api_name": "doco.client.Client", "line_number": 151, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 164, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 166, "usage_type": "call"}]} {"seq_id": "475244849", "text": "from pathlib import Path\n\nimport pytest\n\nfrom ogc import run\nfrom ogc.enums import SpecCore\nfrom ogc.exceptions import SpecProcessException\nfrom ogc.spec import SpecJobMatrix, SpecJobPlan, SpecLoader, SpecPlugin\nfrom ogc.state import app\n\nfixtures_dir = Path(__file__).parent / \"fixtures\"\n\n\n@pytest.fixture(scope=\"module\")\ndef runners():\n \"\"\" Fixture with the parsed runners\n \"\"\"\n spec = SpecLoader.load([fixtures_dir / \"spec.yml\"])\n return [job for job in spec[\"plan\"]]\n\n\ndef test_matrix_combos(mocker):\n \"\"\" Test all combos are present\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec-matrix.yml\"])\n matrixes = SpecJobMatrix(spec[SpecCore.MATRIX])\n combos = matrixes.generate()\n assert {\n \"snap_version\": \"1.18/edge\",\n \"series\": \"focal\",\n \"channel\": \"stable\",\n \"arch\": \"arm64\",\n } in combos\n assert {\n \"snap_version\": \"1.17/stable\",\n \"series\": \"bionic\",\n \"channel\": \"stable\",\n \"arch\": \"arm64\",\n } in combos\n assert {\n \"snap_version\": \"1.15/edge\",\n \"series\": \"xenial\",\n \"channel\": \"edge\",\n \"arch\": \"amd64\",\n } in combos\n\n\ndef test_matrix_replace_env_var(mocker):\n \"\"\" Tests that a matrix variable updates an referenced environment variable\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec-matrix.yml\"])\n matrixes = SpecJobMatrix(spec[SpecCore.MATRIX])\n jobs = [SpecJobPlan(spec[SpecCore.PLAN], matrix) for matrix in matrixes.generate()]\n jobs[0].env()\n assert app.env[\"JUJU_CONTROLLER\"] == \"validate-ck-{}\".format(\n jobs[0].matrix[\"series\"]\n )\n assert app.env[\"JUJU_DEPLOY_CHANNEL\"] == jobs[0].matrix[\"channel\"]\n\n\ndef test_yml_include_spec(mocker):\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec-base.yml\"])\n assert \"juju\" in spec[\"plan\"][0][\"before-script\"][0]\n\n\ndef test_nested_assets(mocker):\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec.yml\"])\n plug = SpecPlugin(spec[SpecCore.PLAN][0][\"script\"][3][\"runner\"])\n assets = plug.opt(\"assets\")\n assert assets[0][\"name\"] == \"pytest configuration\"\n\n\ndef test_cmd_to_env(mocker):\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec.yml\"])\n _env = {}\n _env[\"BONZAI\"] = \"bonzai-test\"\n _env[\"ANOTHERTIME\"] = \"this happened late\"\n _env[\"VAR_NICE\"] = \"interesting concept\"\n _env[\"CONTROLLER\"] = \"juju-controller\"\n _env[\"MODEL\"] = \"juju-model\"\n app.env = _env\n spec = SpecPlugin(spec[SpecCore.PLAN][0][\"script\"][0][\"runner\"])\n cmd = spec.opt(\"cmd\")\n assert cmd == (\n \"echo bonzai-test lthis happened \"\n \"late envinteresting concept \"\n \"juju-controller:juju-model \"\n \"juju-controller juju-model\"\n )\n\n\ndef test_get_option_env_key(mocker):\n \"\"\" Tests that an environment variable set for a given option is converted\n into the hosts environment setting \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec.yml\"])\n plug = SpecPlugin(spec[SpecCore.PLAN][0][\"before-script\"][0][\"juju\"])\n\n _env = {}\n _env[\"JUJU_CLOUD\"] = \"aws/us-east-1\"\n _env[\"JUJU_CONTROLLER\"] = \"test-controller\"\n _env[\"JUJU_MODEL\"] = \"test-model\"\n app.env = _env\n assert plug.opt(\"cloud\") == \"aws/us-east-1\"\n assert plug.opt(\"controller\") == \"test-controller\"\n assert plug.opt(\"model\") == \"test-model\"\n\n\ndef test_get_option_env_key_bool(mocker):\n \"\"\" Tests that get_plugin_option handles boolean values correctly\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec.yml\"])\n plug = SpecPlugin(spec[SpecCore.PLAN][0][\"before-script\"][0][\"juju\"])\n\n _env = {}\n _env[\"JUJU_CLOUD\"] = \"aws/us-east-1\"\n _env[\"JUJU_CONTROLLER\"] = \"test-controller\"\n _env[\"JUJU_MODEL\"] = \"test-model\"\n app.env = _env\n assert plug.opt(\"deploy.reuse\") is True\n\n\ndef test_run_script_passes_check(mocker):\n \"\"\" Tests that we can run shell commands\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n run.script(\"ls -l\", env=app.env.copy())\n\n\ndef test_run_script_blob_passes_check(mocker):\n \"\"\" Tests that we can run shell scripts\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n blob = \"\"\"\n#!/bin/bash\nset -x\nls -l\n\"\"\"\n run.script(blob, env=app.env.copy())\n\n\ndef test_run_script_fails_check(mocker):\n \"\"\" Tests that we can run shell scripts\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n with pytest.raises(SpecProcessException):\n run.script(\"ls -l\\necho HI\\nexit 1\", env=app.env.copy())\n\n\n@pytest.mark.skip\ndef test_condition_if(mocker):\n \"\"\" Tests that a condition will skip a job item\n \"\"\"\n mocker.patch(\"ogc.state.app.log\")\n spec = SpecLoader.load([fixtures_dir / \"spec-condition.yml\"])\n matrixes = [SpecJobMatrix(matrix) for matrix in app.spec[SpecCore.MATRIX]]\n\n jobs = [SpecJobPlan(app.spec[SpecCore.PLAN], matrix) for matrix in matrixes]\n assert jobs[0].condition_if()\n assert jobs[1].condition_if()\n assert jobs[2].condition_if() is False\n", "sub_path": "tests/test_ogc.py", "file_name": "test_ogc.py", "file_ext": "py", "file_size_in_byte": 5139, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 18, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 18, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 26, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 26, "usage_type": "name"}, {"api_name": "ogc.spec.SpecJobMatrix", "line_number": 27, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.MATRIX", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 27, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 53, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 53, "usage_type": "name"}, {"api_name": "ogc.spec.SpecJobMatrix", "line_number": 54, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.MATRIX", "line_number": 54, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 54, "usage_type": "name"}, {"api_name": "ogc.spec.SpecJobPlan", "line_number": 55, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.PLAN", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 55, "usage_type": "name"}, {"api_name": "ogc.state.app.env", "line_number": 57, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 57, "usage_type": "name"}, {"api_name": "ogc.state.app.env", "line_number": 60, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 60, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 65, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 65, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 71, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 71, "usage_type": "name"}, {"api_name": "ogc.spec.SpecPlugin", "line_number": 72, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.PLAN", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 72, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 79, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 79, "usage_type": "name"}, {"api_name": "ogc.state.app.env", "line_number": 86, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 86, "usage_type": "name"}, {"api_name": "ogc.spec.SpecPlugin", "line_number": 87, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.PLAN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 87, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 101, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 101, "usage_type": "name"}, {"api_name": "ogc.spec.SpecPlugin", "line_number": 102, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.PLAN", "line_number": 102, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 102, "usage_type": "name"}, {"api_name": "ogc.state.app.env", "line_number": 108, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 108, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 118, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 118, "usage_type": "name"}, {"api_name": "ogc.spec.SpecPlugin", "line_number": 119, "usage_type": "call"}, {"api_name": "ogc.enums.SpecCore.PLAN", "line_number": 119, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 119, "usage_type": "name"}, {"api_name": "ogc.state.app.env", "line_number": 125, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 125, "usage_type": "name"}, {"api_name": "ogc.run.script", "line_number": 133, "usage_type": "call"}, {"api_name": "ogc.run", "line_number": 133, "usage_type": "name"}, {"api_name": "ogc.state.app.env.copy", "line_number": 133, "usage_type": "call"}, {"api_name": "ogc.state.app.env", "line_number": 133, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 133, "usage_type": "name"}, {"api_name": "ogc.run.script", "line_number": 145, "usage_type": "call"}, {"api_name": "ogc.run", "line_number": 145, "usage_type": "name"}, {"api_name": "ogc.state.app.env.copy", "line_number": 145, "usage_type": "call"}, {"api_name": "ogc.state.app.env", "line_number": 145, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 145, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 152, "usage_type": "call"}, {"api_name": "ogc.exceptions.SpecProcessException", "line_number": 152, "usage_type": "argument"}, {"api_name": "ogc.run.script", "line_number": 153, "usage_type": "call"}, {"api_name": "ogc.run", "line_number": 153, "usage_type": "name"}, {"api_name": "ogc.state.app.env.copy", "line_number": 153, "usage_type": "call"}, {"api_name": "ogc.state.app.env", "line_number": 153, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 153, "usage_type": "name"}, {"api_name": "ogc.spec.SpecLoader.load", "line_number": 161, "usage_type": "call"}, {"api_name": "ogc.spec.SpecLoader", "line_number": 161, "usage_type": "name"}, {"api_name": "ogc.spec.SpecJobMatrix", "line_number": 162, "usage_type": "call"}, {"api_name": "ogc.state.app.spec", "line_number": 162, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 162, "usage_type": "name"}, {"api_name": "ogc.enums.SpecCore.MATRIX", "line_number": 162, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 162, "usage_type": "name"}, {"api_name": "ogc.spec.SpecJobPlan", "line_number": 164, "usage_type": "call"}, {"api_name": "ogc.state.app.spec", "line_number": 164, "usage_type": "attribute"}, {"api_name": "ogc.state.app", "line_number": 164, "usage_type": "name"}, {"api_name": "ogc.enums.SpecCore.PLAN", "line_number": 164, "usage_type": "attribute"}, {"api_name": "ogc.enums.SpecCore", "line_number": 164, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 156, "usage_type": "attribute"}]} {"seq_id": "463459764", "text": "# -*- coding: utf-8 -*-\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import padding\nimport base64\nimport hashlib\n\ndef md5Hex(instr):\n return hashlib.md5(instr.encode(encoding='UTF-8')).hexdigest()\n\ndef md5HexJy(instr, keystr):\n return md5Hex(md5Hex(md5Hex(instr))+keystr)\n\nclass aes(object):\n def __init__(self,key,iv):\n self.key = key\n self.iv = iv\n key_b = bytes(self.key, 'utf-8')\n iv_b = bytes(self.iv, 'utf-8')\n self.cipher = Cipher(algorithms.AES(key_b), modes.CBC(iv_b), backend=default_backend())\n\n def encodeJson(self,line):\n byline = bytes(line, 'utf-8')\n encryptor = self.cipher.encryptor()\n padder = padding.PKCS7(128).padder()\n padded_data = padder.update(byline) + padder.finalize()\n enc_content = encryptor.update(padded_data) + encryptor.finalize()\n stren =base64.b64encode(enc_content)\n return stren.decode()\n\n def decodeJson(self,enStr):\n deStr = bytes(enStr,'utf-8')\n destrendd = base64.b64decode(deStr)\n depadder = padding.PKCS7(128).unpadder()\n decryptor = self.cipher.decryptor()\n padded_data = decryptor.update(destrendd) + decryptor.finalize()\n dec_content = depadder.update(padded_data) + depadder.finalize()\n return dec_content.decode()\n", "sub_path": "ApaySim/hoe/hoeAes.py", "file_name": "hoeAes.py", "file_ext": "py", "file_size_in_byte": 1436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "hashlib.md5", "line_number": 9, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.Cipher", "line_number": 20, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 20, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 20, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.CBC", "line_number": 20, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 20, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 20, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.padding.PKCS7", "line_number": 25, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.padding", "line_number": 25, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 28, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 33, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.padding.PKCS7", "line_number": 34, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.padding", "line_number": 34, "usage_type": "name"}]} {"seq_id": "629901762", "text": "import pyautogui, time #imports pyautogui and time modules\r\nfrom time import sleep #imports sleep from time module\r\n\r\n##########\r\ncount = 0# #counter\r\n##########\r\n\r\nwhile True:\r\n time.sleep(10) #you can change the time if u want (second)\r\n count += 1 #Don't change this\r\n myScreenshot = pyautogui.screenshot() #Takes the screenshot\r\n myScreenshot.save(r'path here\\screenshot{}.png'.format(count)) #saves the screenshot, change \"path here\".\r\n", "sub_path": "ss.py", "file_name": "ss.py", "file_ext": "py", "file_size_in_byte": 454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "time.sleep", "line_number": 9, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 11, "usage_type": "call"}]} {"seq_id": "273873157", "text": "import discord\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = '.')\n\n@client.event\nasync def on_ready():\n print('Hello World!')\n\n####### METHODS #######\n\n#Watch list\n#Adds users to the specified group for mentioning later\n@client.command(aliases=['bought'])\nasync def watch(ctx):\n role = discord.utils.get(ctx.guild.roles, name=\"Stalnks\")\n user = ctx.message.author\n userMessage = ctx.message\n botMessage = user.mention + ', here\\'s hoping for a nice surprise!'\n await user.add_roles(role)\n botMessageEvent = await ctx.send(botMessage)\n await userMessage.delete(delay=1)\n await botMessageEvent.delete(delay=5)\n\n#Unwatch\n#Removes users from the specified group\n@client.command(aliases=['sold'])\nasync def unwatch(ctx):\n role = discord.utils.get(ctx.guild.roles, name=\"Stalnks\")\n user = ctx.message.author\n userMessage = ctx.message\n botMessage = user.mention + ', pleasure doin\\' business with ya!'\n await user.remove_roles(role)\n botMessageEvent = await ctx.send(botMessage)\n await userMessage.delete(delay=1)\n await botMessagEvent.delete(delay=5)\n\n#buying\n#Mentions the role with the price and the dodo code for travelling\n#PARAMETERS:\n # bellAmount: Price at Nook's Cranny on the island\n # dodoCode: Dodo Code from the Airport for travelling\n\n@client.command()\nasync def buying(ctx, bellAmount, dodoCode: str):\n authorUser = ctx.message.author\n mentionRole = discord.utils.get(ctx.guild.roles, name=\"Stalnks\")\n openRole = discord.utils.get(ctx.guild.roles, name=\"Open Town\")\n await authorUser.add_roles(openRole)\n if(bellAmount and dodoCode):\n await ctx.send(mentionRole.mention + ' ' + authorUser.mention + '\\'s island is buying turnips for ' + str(bellAmount) + ' bells!' +\n '\\n Their Dodo Code is: ' + dodoCode)\n\n#Stop selling\n#Edits the previous message to say that the island is closed\n@client.command(aliases=['stop'])\nasync def stop_selling(ctx):\n channel = ctx.message.channel\n authorUser = ctx.message.author\n openRole = discord.utils.get(ctx.guild.roles, name=\"Open Town\")\n #I have a feeling once a channel has a lot of messages this will become unwieldy\n async for m_id in channel.history().filter(lambda m: authorUser in m.mentions).filter(lambda m: 'island is' in m.content).map(lambda m:m):\n messageToEdit = m_id\n await authorUser.remove_roles(openRole)\n await messageToEdit.edit(content=authorUser.mention + ' has closed their town.')\n await messageToEdit.delete(delay=3600)\n #I have a feeling once a channel has a lot of messages this will become unwieldy\n async for m_id in channel.history().filter(lambda m: m.author == authorUser).filter(lambda m: '.selling' in m.content).map(lambda m:m):\n botSummonMessage = m_id\n await botSummonMessage.delete(delay=3)\n #I have a feeling once a channel has a lot of messages this will become unwieldy\n async for m_id in channel.history().filter(lambda m: m.author == authorUser).filter(lambda m: '.buying' in m.content).map(lambda m:m):\n botSummonMessage = m_id\n await botSummonMessage.delete(delay=3)\n await ctx.message.delete(delay=3)\n\n#Set Fruit role\n#Set the role in the server based on your Fruit\n#PARAMETERS:\n # Fruit: name of the fruit to get role\n@client.command()\nasync def fruit(ctx, fruit: str):\n fruit = fruit.lower()\n authorUser = ctx.message.author\n peachRole = discord.utils.get(ctx.guild.roles, name=\"just peachy\")\n orangeRole = discord.utils.get(ctx.guild.roles, name=\"orange you glad\")\n appleRole = discord.utils.get(ctx.guild.roles, name=\"apple a day...\")\n pearRole = discord.utils.get(ctx.guild.roles, name=\"thank you pear-y much\")\n cherryRole = discord.utils.get(ctx.guild.roles,name=\"so cherry sweet\")\n\n if(peachRole in authorUser.roles or orangeRole in authorUser.roles or appleRole in authorUser.roles or\n pearRole in authorUser.roles or cherryRole in authorUser.roles):\n await ctx.send(\"You already have a fruit!\")\n\n else:\n if(fruit == 'cherry'):\n await authorUser.add_roles(cherryRole)\n await ctx.send(\"You have set your fruit to Cherry!\")\n elif(fruit == 'orange'):\n await authorUser.add_roles(orangeRole)\n await ctx.send(\"You have set your fruit to Orange!\")\n elif(fruit == 'peach'):\n await authorUser.add_roles(peachRole)\n await ctx.send(\"You have set your fruit to Peach!\")\n elif(fruit == 'apple'):\n await authorUser.add_roles(appleRole)\n await ctx.send(\"You have set your fruit to Apple!\")\n elif(fruit == 'pear'):\n await authorUser.add_roles(pearRole)\n await ctx.send(\"You have set your fruit to Pear!\")\n\n#Help\n@client.command()\nasync def dmhelp(ctx):\n await ctx.send(\"Why hullo there! I'm Daisy Mae!\")\n await ctx.send(\"Need some help?\")\n\n await ctx.send(\"Prefix: '.'\\nAdd Fruit: **.fruit** [*cherry*,*orange*,*peach*,*apple*,*pear*]\\n\" +\n \"\\nAdd to Watchlist: **.bought**/**.watch**\\n\\nRemove from watchlist: **.sold**/**.unwatch**\\n\"+\n \"\\nAnnounce buying & being open: **.buying**\\n\\nClose town (use after .buying): **.stop**\")\n\nclient.run('')\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 5250, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 4, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 4, "usage_type": "name"}, {"api_name": "discord.utils.get", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 16, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 47, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 47, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 48, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 48, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 60, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 60, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 85, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 85, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 86, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 86, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 87, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 87, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 88, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 88, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 89, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 89, "usage_type": "attribute"}]} {"seq_id": "282700517", "text": "import functools\n\nfrom flask import render_template, current_app, session as flask_session\nfrom ..db import Player\n\ndef redirect_page(title, message, redirect_url, redirect_delay=0):\n message = message.format(delay=redirect_delay)\n return render_template('redirect.html', title=title, message=message,\n url=redirect_url, delay=redirect_delay)\n\ndef inject_current_player(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n with current_app.db.session_scope() as session:\n if 'username' in flask_session:\n current_player = session.query(Player). \\\n filter_by(name=flask_session['username']).one_or_none()\n else:\n current_player = None\n return func(session, current_player, *args, **kwargs)\n return wrapped\n", "sub_path": "telepict/flask_app/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "flask.render_template", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.current_app.db.session_scope", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.current_app.db", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 15, "usage_type": "name"}, {"api_name": "db.Player", "line_number": 16, "usage_type": "argument"}, {"api_name": "flask.session", "line_number": 17, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 12, "usage_type": "call"}]} {"seq_id": "344229013", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('train.csv')\n\nX_train = dataset.iloc[:, 1:13].values\nY_train = dataset.iloc[:,[13]].values\ndf_y_train= pd.DataFrame(Y_train)\ndf_x_train = pd.DataFrame(X_train)\n\n### binarize Y train column \nY_train =np.where(Y_train == 'correct',1,0)\n\n## handling missing values\nfrom sklearn.preprocessing import Imputer\nimputer_mode =Imputer(missing_values ='NaN', strategy ='most_frequent',axis =0)\nimputer_mode.fit(X_train[:,[0]])\nX_train[:,[0]]=imputer_mode.transform(X_train[:,[0]])\n\n\nimputer = Imputer(missing_values ='NaN', strategy ='most_frequent',axis =0)\nimputer.fit(X_train[:,[1,2,3,4,11]])\nX_train[:,[1,2,3,4,11]] = imputer.transform(X_train[:,[1,2,3,4,11]])\ndf = pd.DataFrame(X_train)\n\nxxc=pd.DataFrame(X_train)\nxxc[5]=pd.to_datetime(xxc[5])\nxxc[6]=pd.to_datetime(xxc[6])\nxxc[5]=xxc[5].map(dt.datetime.toordinal)\nxxc[6]=xxc[6].map(dt.datetime.toordinal)\nX_train[:,5]=xxc[5]\nX_train[:,6]=xxc[6]\n\n\n\n\n##DO the same for Test set\ntest_dataset =pd.read_csv('test.csv')\n\nX_test = test_dataset.iloc[:, 1:13].values\ndf_x_test = pd.DataFrame(X_test)\n\nxxc=pd.DataFrame(X_test)\nxxc[5]=pd.to_datetime(xxc[5])\nxxc[6]=pd.to_datetime(xxc[6])\nxxc[5]=xxc[5].map(dt.datetime.toordinal)\nxxc[6]=xxc[6].map(dt.datetime.toordinal)\nX_test[:,5]=xxc[5]\nX_test[:,6]=xxc[6]\n\n\nTest_ids = test_dataset.iloc[:,0:1]\n\nimputer_mode =Imputer(missing_values ='NaN', strategy ='most_frequent',axis =0)\nimputer_mode.fit(X_test[:,[0]])\nX_test[:,[0]]=imputer_mode.transform(X_test[:,[0]])\n\n\nimputer = Imputer(missing_values ='NaN', strategy ='most_frequent',axis =0)\nimputer.fit(X_test[:,[1,2,3,4,11]])\nX_test[:,[1,2,3,4,11]] = imputer.transform(X_test[:,[1,2,3,4,11]])\n\n\n## Now back to work................\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train[:,[0,1,2,3,4,5,6,7,8,9,10,11]])\nX_test = scaler.fit_transform(X_test[:,[0,1,2,3,4,5,6,7,8,9,10,11]])\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nclassifier = LogisticRegression(random_state=0)\nclassifier.fit(X_train,Y_train)\n\n#Import test values\n\ny_preds = classifier.predict(X_test)\nY_pred_dataframe = pd.DataFrame(y_preds)\nY_test_ids_dataframe = pd.DataFrame(Test_ids)\n\nsubmission =pd.concat([Y_test_ids_dataframe,Y_pred_dataframe],axis=1)\nsubmission =submission.rename(columns={\"tiripid\":\"tripid\"})\nsubmission =submission.rename(columns={0:\"prediction\"})\n\nsubmission.to_csv('submission.csv',index=False)\n\n\n\n\n\n\n", "sub_path": "logistic-regression/logisticmodel.py", "file_name": "logisticmodel.py", "file_ext": "py", "file_size_in_byte": 2502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 86, "usage_type": "call"}]} {"seq_id": "543203093", "text": "import pytest\nimport arrays_5.sample_data as s\n\n\ndef test_sample_data():\n subset_length = 4\n input_set = [5, 10, 20, 4, 3, 5]\n result = s.sample_data(input_set, subset_length)\n\n if len(result) != subset_length:\n pytest.fail(f'sample_data() result length is {len(result)}, while expected {subset_length}')\n", "sub_path": "source/arrays_5/sample_data_test.py", "file_name": "sample_data_test.py", "file_ext": "py", "file_size_in_byte": 324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "arrays_5.sample_data.sample_data", "line_number": 8, "usage_type": "call"}, {"api_name": "arrays_5.sample_data", "line_number": 8, "usage_type": "name"}, {"api_name": "pytest.fail", "line_number": 11, "usage_type": "call"}]} {"seq_id": "632928150", "text": "import logging\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\n\nFORMAT = '%(asctime)-15s %(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.propagate = True\n\n\nclass SGD_SVM(object):\n n = 0\n\n def __init__(self):\n self.w = None\n np.set_printoptions(precision=3)\n\n def calc_w(self, x, y, g=0.1, l=0.00001):\n return self._calc_w(self.w, x, y, g, l)\n\n def _calc_w(self, w, x_, y_, g=0.1, l=0.00001):\n v = y_ * np.dot(w, x_)\n return w - (g * l) * w + (0 if v > 1 else y_ * x_)\n\n def Qsvm(self, x_, y_, l=0.00001):\n return self._Qsvm(self.w, x_, y_, l)\n\n def _Qsvm(self, w, x_, y_, l=0.0001):\n v = y_ * np.dot(w, x_)\n loss = l * np.linalg.norm(w)**2 + max(0, 1 - v)\n grad = 0 if v > 1 else -y_ * x_\n return loss, grad\n\n def fw(self, x):\n return self._fw(self.w, x)[0]\n\n def _fw(self, w, x):\n value = np.dot(x, w)\n return np.sign(value), value\n\n def score(self, matrix, categories):\n n = 0; m = 0; v = []; s = []\n for x_, y_ in zip(matrix, categories):\n l, v_ = self._fw(self.w, x_)\n v.append(v_), s.append(y_)\n if l == y_:\n m += 1\n else:\n pass\n # logger.info(\"{} invece era {}\".format(l, y_))\n n += 1\n\n return m / float(n), s, v\n\n def fit(self, matrix, categories):\n i = random.choice(range(matrix.shape[0]))\n self.w = self.calculate_w(matrix, categories, matrix[i].copy())\n return self.w\n\n def calculate_empirical_risk(self, x, y, w, g, l):\n loss = 0.0; n = len(y)\n for x_, y_ in zip(x, y):\n loss += l * np.linalg.norm(w) ** 2 + max(0, 1 - y_ * np.dot(w, x_))\n loss /= n\n return loss\n\n def calculate_w(self, x, y, w):\n\n l = 0.001\n g0 = 0.5; g = 0.0\n n = len(y); t = 1.0; i = 0\n\n old_p = int(100 * (t / n)); p = 0.9\n delta = np.inf; loss0 = np.inf; loss = 0\n\n # logger.info(\"{:>20}{:>20}{:>20}{:>20}\".format(\"LOSS\", \"DELTA\", \"ACTUAL LOSS\", \"DELTA 2\"))\n data = []\n while t < n: # loss0 == np.inf or delta > 0.000001 or t < n:\n i = random.choice(range(n))\n x_, y_ = x[i], y[i]\n\n if (old_p != p and p % 10 == 0):\n logger.info(\"Completamento {}%\".format(p))\n old_p = p\n #actual_q = self._Qsvm(w, x_, y_, l)[0]\n #loss = self.calculate_empirical_risk(x, y, w, g, l)\n #delta2 = np.abs(actual_q - loss)\n #delta = np.abs(loss0 - loss)\n #loss0 = loss\n #data.append( (loss, delta, actual_q, delta2) )\n p = int(100 * (t / n))\n\n\n g = g0 / (1 + l * g0 * t)\n # g = 1 / t\n grad = 0 if (y_ * np.dot(w, x_) > 1) else y_ * x_\n w = w - (g * l) * w + g * grad\n t += 1\n\n # v = y_ * np.dot(w, x_)\n # actual_q = self._Qsvm(w, x_, y_, l)[0]\n # loss = self.calculate_empirical_risk(x, y, w, g, l)\n # delta = np.abs(loss0 - loss)\n # delta2 = np.abs(actual_q - loss)\n # loss0 = loss\n # logger.info(\"{:>20}{:>20}{:>20}{:>20}\".format(loss, delta, actual_q, delta2))\n\n #loss = self.calculate_empirical_risk(x, y, w, g, l)\n #delta2 = np.abs(actual_q - loss)\n #delta = np.abs(loss0 - loss)\n #logger.info(\"{:>20}{:>20}{:>20}{:>20}\".format(\"LOSS\", \"DELTA\", \"ACTUAL LOSS\", \"DELTA 2\"))\n #for d in data:\n # logger.info(\"{:>20}{:>20}{:>20}{:>20}\".format(d[0], d[1], d[2], d[3]))\n return w\n\n\nif __name__ == '__main__':\n pass\n", "sub_path": "SGD_SVM_2.py", "file_name": "SGD_SVM_2.py", "file_ext": "py", "file_size_in_byte": 3807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.set_printoptions", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 44, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 79, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 101, "usage_type": "call"}]} {"seq_id": "104357907", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.externals import joblib\nimport argparse\nimport os\n# ๅŠ ไธŠ48ๅฐๆ—ถๅ‰obs ไฟกๆฏ\n# ๅค„็† RAIN ๅ€ผ ๅŽป้™ค 35ไปฅไธŠๆ•ฐๅ€ผ\ntarget_list=['t2m','rh2m','w10m']\nfrom datetime import timedelta\nfrom datetime import datetime\ndef datelist(beginDate, endDate):\n date_l=[datetime.strftime(x,'%Y-%m-%d') for x in list(pd.date_range(start=beginDate, end=endDate))]\n return date_l\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--log-level', dest='log_level', default='info', type=str,\n help='Logging level.')\n parser.add_argument('--model_dir', dest='model_dir',\n default='../checkpoints/lgb',\n type=str)\n parser.add_argument('--data_dir', dest='data_dir',\n default='../data/testb7/merge', type=str)\n\n parser.add_argument('--dst_dir', dest='dst_dir',\n default='../result/lgb')\n parser.add_argument('--first_day', dest='first_day',\n default='20181028', type=str)\n parser.add_argument('--last_day', dest='last_day',\n default='20181103', type=str)\n\n opt = parser.parse_args()\n\n feature_columns = ['t2m_obs', 'rh2m_obs', 'w10m_obs', 'psur_obs', 'q2m_obs', 'u10m_obs',\n 'v10m_obs', 'RAIN_obs',\n 't2m_prophet', 'rh2m_prophet', 'w10m_prophet',\n 't2m_M', 'rh2m_M', 'w10m_M', 'hour_sin', 'hour_cos', 'month_sin', 'month_cos',\n 'psfc_M', 'q2m_M', 'u10m_M', 'v10m_M',\n 'SWD_M', 'GLW_M', 'HFX_M', 'RAIN_M', 'PBLH_M', 'TC975_M', 'TC925_M',\n 'TC850_M', 'TC700_M', 'TC500_M', 'wspd925_M', 'wspd850_M', 'wspd700_M', 'wspd500_M']\n if opt.model_dir.endswith('_q'):\n feature_columns = feature_columns + ['Q975_M', 'Q925_M', 'Q850_M', 'Q700_M', 'Q500_M', 'LH_M']\n\n history_num = 24\n\n begin_dates = datelist(pd.to_datetime(opt.first_day) - timedelta(days=2),\n pd.to_datetime(opt.last_day) - timedelta(days=2))\n dst_dates = datelist(opt.first_day, opt.last_day)\n end_dates = datelist(pd.to_datetime(opt.first_day) + timedelta(days=1),\n pd.to_datetime(opt.last_day) + timedelta(days=1))\n\n model_dir = opt.model_dir\n data_dir = opt.data_dir\n dst_dir = opt.dst_dir\n if not os.path.exists(dst_dir):\n os.mkdir(dst_dir)\n\n for begin_date,dst_date,end_date in zip(begin_dates,dst_dates,end_dates):\n end_date=end_date+' 12-00-00'\n whole_submit = None\n for i in range(90001, 90011):\n index_list = []\n for j in range(37):\n index_list.append(str(i) + '_' + '{:02d}'.format(j))\n index = pd.DataFrame(index_list, columns=['FORE_data'])\n\n results = []\n for feature_index in range(3):\n\n lgb_model = joblib.load(os.path.join(model_dir,'model_' + str(i) + '_feature_' + str(feature_index) + '.m'))\n\n\n data_pre = pd.read_csv(os.path.join(data_dir,'merged_' + str(i) + '.csv'), index_col=0)\n\n data_pre.index = pd.to_datetime(data_pre.index)\n data_pre['hour'] = data_pre.index.hour\n data_pre['month'] = data_pre.index.month\n hour_period = 24 / (2 * np.pi)\n data_pre['hour_cos'] = np.cos(data_pre.index.hour / hour_period)\n data_pre['hour_sin'] = np.sin(data_pre.index.hour / hour_period)\n\n month_period = 12 / (2 * np.pi)\n data_pre['month_cos'] = np.cos(data_pre.index.month / month_period)\n data_pre['month_sin'] = np.sin(data_pre.index.month / month_period)\n\n data_pre['u10m_obs']=data_pre['u10m_obs']/data_pre['w10m_obs']\n data_pre['v10m_obs']=data_pre['v10m_obs']/data_pre['w10m_obs']\n data_pre['u10m_M']=data_pre['u10m_M']/data_pre['w10m_M']\n data_pre['v10m_M']=data_pre['v10m_M']/data_pre['w10m_M']\n\n data_pre = pd.DataFrame(data_pre, columns=feature_columns)\n data_pre = data_pre[begin_date:end_date]\n for col in data_pre.columns:\n data_pre[col] = data_pre[col].fillna(data_pre[col].mean())\n\n pre_data_float = np.array(data_pre)\n # print(pre_data_float.shape)\n history = list(pre_data_float[24:48, feature_index])\n result = []\n for k in range(37):\n row_data = pre_data_float[48 + k:48 + k + 1, 8:]\n curr_history = np.array(history)[k:k + history_num]\n obs_48h_ago = pre_data_float[k:k + 1, :8]\n # print(row_data.shape)\n # print(curr_history.shape)\n curr_history = curr_history.reshape((1, -1))\n X = np.c_[row_data, curr_history, obs_48h_ago]\n #print(X.shape)\n y = lgb_model.predict(X)\n # print(y.shape)\n if k < 4:\n result.append(pre_data_float[48 + k, feature_index])\n history.append(pre_data_float[48 + k, feature_index])\n else:\n result.append(y[0])\n history.append(y[0])\n # print('result:', result)\n rmse = np.sqrt(mean_squared_error(pre_data_float[48:, feature_index], np.array(result)))\n rmse_M = np.sqrt(\n mean_squared_error(pre_data_float[48:, feature_index], pre_data_float[48:, feature_index +11]))\n results.append(result)\n\n print('rmse:', rmse)\n print('rmse_M:', rmse_M)\n print('score:', (rmse_M - rmse) / rmse_M)\n suffix=opt.model_dir.split('/')[-1]\n submit = pd.DataFrame(np.array(results).T, columns=['t2m_'+suffix, 'rh2m_'+suffix, 'w10m_'+suffix])\n submit = pd.concat([index, submit], axis=1)\n if whole_submit is None:\n whole_submit = submit\n else:\n whole_submit = pd.concat([whole_submit, submit], axis=0)\n\n whole_submit.to_csv(os.path.join(dst_dir,dst_date + '.csv'), index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "gbm/gensubmit_local.py", "file_name": "gensubmit_local.py", "file_ext": "py", "file_size_in_byte": 6411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "datetime.datetime.strftime", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "pandas.date_range", "line_number": 13, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}]} {"seq_id": "248043379", "text": "from snownlp import SnowNLP\nimport jieba\nimport jieba.analyse\nimport sqlite3\nimport re\njieba.load_userdict(\"mydict.txt\")\n\n\nconn = sqlite3.connect(r'C:\\Users\\I321338\\PycharmProjects\\Trendar\\db.sqlite3')\ncursor = conn.cursor()\n\nf = open(r\"C:\\Users\\I321338\\PycharmProjects\\Trendar\\machine_learning\\test500.txt\",'r', encoding='utf-8')\n\ntext = f.readline()\nsentiment = [0,0,0,0,0,0,0,0,0,0]\nwhile text!=\"\":\n ###############################\n #ๅฆ‚ๆžœ่ฆๆŠŠๅฅๅญๆ นๆฎๆ ‡็‚น็ฌฆๅทๅˆ†ๅผ€ๅ†ๆฅๅˆคๆ–ญ๏ผŒๆŠŠไธ‹้ขไธ‰ๅฅ่ฏ่งฃๅผ€ๆณจ้‡Š\n #่ฟ˜่ฆๆŠŠSnownlp(text)็š„textๆ”นๆˆi๏ผŒr.write(text)็š„textๆ”นๆˆi\n ###############################\n\n # file = re.split(\"\\n|๏ผŒ|ใ€‚|๏ผ|\\ \" ,text)\n # for i in file:\n # if i != '':\n text_snow = SnowNLP(text)\n score = text_snow.sentiments\n sentiment[int(score*10)] = sentiment[int(score*10)]+1\n text = f.readline()\n\nprint(sentiment)\ncursor.execute(\"delete from dashboard_sentiment\")\ncursor.execute(\"update sqlite_sequence SET seq = 0 where name ='dashboard_sentiment'\")\nfor num in sentiment:\n # num = int(n)\n # print(num)\n cursor.execute(\" insert into dashboard_sentiment (number) values(?)\",(num,))\nconn.commit()", "sub_path": "Trendar/machine_learning/Sentiment.py", "file_name": "Sentiment.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "jieba.load_userdict", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "snownlp.SnowNLP", "line_number": 25, "usage_type": "call"}]} {"seq_id": "13506255", "text": "# Copyright (c) 2016 Baidu, Inc. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .layers import LayerOutput, mixed_layer, identity_projection, \\\n slope_intercept_layer\nfrom .attrs import is_compatible_with\nfrom .default_decorators import *\nimport activations as act\n\n__all__ = []\n\ndef register_unary_math_op(op_name, act):\n def op(input, name=None):\n return mixed_layer(input=[identity_projection(input=input)],\n name=name,\n act=act)\n op = wrap_name_default(op_name)(op)\n op.__doc__ = type(act).__doc__\n globals()[op_name] = op\n __all__.append(op_name)\n\nregister_unary_math_op('exp', act.ExpActivation())\nregister_unary_math_op('log', act.LogActivation())\nregister_unary_math_op('abs', act.AbsActivation())\nregister_unary_math_op('sigmoid', act.SigmoidActivation())\nregister_unary_math_op('tanh', act.TanhActivation())\nregister_unary_math_op('square', act.SquareActivation())\n\ndef add(layeroutput, other):\n if is_compatible_with(other, float):\n return slope_intercept_layer(input=layeroutput, intercept=other)\n assert isinstance(other, LayerOutput)\n return mixed_layer(input=[identity_projection(input=layeroutput),\n identity_projection(input=other)])\n\nLayerOutput.__radd__ = add\nLayerOutput.__add__ = add\n\ndef sub(layeroutput, other):\n if is_compatible_with(other, float):\n return slope_intercept_layer(input=layeroutput, intercept=other)\n assert isinstance(other, LayerOutput)\n neg = slope_intercept_layer(input=other, slope=-1.0)\n return mixed_layer(input=[identity_projection(input=layeroutput),\n identity_projection(input=neg)])\n\nLayerOutput.__sub__ = sub\n\ndef rsub(layeroutput, other):\n neg = slope_intercept_layer(input=layeroutput, slope=-1.0)\n return add(neg, other)\n\nLayerOutput.__rsub__ = rsub\n", "sub_path": "python/paddle/trainer_config_helpers/math.py", "file_name": "math.py", "file_ext": "py", "file_size_in_byte": 2405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "layers.mixed_layer", "line_number": 25, "usage_type": "call"}, {"api_name": "layers.identity_projection", "line_number": 25, "usage_type": "call"}, {"api_name": "activations.ExpActivation", "line_number": 33, "usage_type": "call"}, {"api_name": "activations.LogActivation", "line_number": 34, "usage_type": "call"}, {"api_name": "activations.AbsActivation", "line_number": 35, "usage_type": "call"}, {"api_name": "activations.SigmoidActivation", "line_number": 36, "usage_type": "call"}, {"api_name": "activations.TanhActivation", "line_number": 37, "usage_type": "call"}, {"api_name": "activations.SquareActivation", "line_number": 38, "usage_type": "call"}, {"api_name": "attrs.is_compatible_with", "line_number": 41, "usage_type": "call"}, {"api_name": "layers.slope_intercept_layer", "line_number": 42, "usage_type": "call"}, {"api_name": "layers.LayerOutput", "line_number": 43, "usage_type": "argument"}, {"api_name": "layers.mixed_layer", "line_number": 44, "usage_type": "call"}, {"api_name": "layers.identity_projection", "line_number": 44, "usage_type": "call"}, {"api_name": "layers.identity_projection", "line_number": 45, "usage_type": "call"}, {"api_name": "layers.LayerOutput.__radd__", "line_number": 47, "usage_type": "attribute"}, {"api_name": "layers.LayerOutput", "line_number": 47, "usage_type": "name"}, {"api_name": "layers.LayerOutput.__add__", "line_number": 48, "usage_type": "attribute"}, {"api_name": "layers.LayerOutput", "line_number": 48, "usage_type": "name"}, {"api_name": "attrs.is_compatible_with", "line_number": 51, "usage_type": "call"}, {"api_name": "layers.slope_intercept_layer", "line_number": 52, "usage_type": "call"}, {"api_name": "layers.LayerOutput", "line_number": 53, "usage_type": "argument"}, {"api_name": "layers.slope_intercept_layer", "line_number": 54, "usage_type": "call"}, {"api_name": "layers.mixed_layer", "line_number": 55, "usage_type": "call"}, {"api_name": "layers.identity_projection", "line_number": 55, "usage_type": "call"}, {"api_name": "layers.identity_projection", "line_number": 56, "usage_type": "call"}, {"api_name": "layers.LayerOutput.__sub__", "line_number": 58, "usage_type": "attribute"}, {"api_name": "layers.LayerOutput", "line_number": 58, "usage_type": "name"}, {"api_name": "layers.slope_intercept_layer", "line_number": 61, "usage_type": "call"}, {"api_name": "layers.LayerOutput.__rsub__", "line_number": 64, "usage_type": "attribute"}, {"api_name": "layers.LayerOutput", "line_number": 64, "usage_type": "name"}]} {"seq_id": "13603448", "text": "from django.db import models\nfrom code_one.models import BankUser\nimport datetime\n\n\n# Create your models here.\nclass DebitCard(models.Model):\n card_number = models.CharField(max_length=16, unique=True)\n date_created = models.DateTimeField(default=datetime.datetime.now())\n expiration_date = models.DateTimeField()\n bankUser = models.ForeignKey(BankUser)\n holder_first = models.CharField(max_length=20)\n holder_mid = models.CharField(max_length=1)\n holder_last = models.CharField(max_length=20)\n\n def __str__(self):\n return self.card_number\n\n\nclass Account(models.Model):\n balance = models.IntegerField(default=0)\n name = models.CharField(max_length=20)\n created_at = models.DateTimeField(default=datetime.datetime.now())\n last_deposit = models.DateTimeField(null=True, blank=True)\n last_withdraw = models.DateTimeField(null=True, blank=True)\n bankUser = models.ForeignKey(BankUser)\n debit_card = models.OneToOneField(DebitCard, on_delete=models.CASCADE, blank=True)\n\n def __str__(self):\n return self.name\n", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 11, "usage_type": "call"}, {"api_name": "code_one.models.BankUser", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 26, "usage_type": "call"}, {"api_name": "code_one.models.BankUser", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 27, "usage_type": "attribute"}]} {"seq_id": "369403935", "text": "import sys\nfrom collections import defaultdict\n\nmy_dict = defaultdict(lambda:0)\nmy_dict[\"eric\"] = 33\nprint(my_dict[\"eric\"])\nprint(my_dict[\"fred\"])\nfor foo, bar in sorted(my_dict.items()):\n\tprint(\"%S --> %r % (foo, bar)\")\n\n\n\nprint('hello world')\n\nmy_int = 4\nmy_float = 2.5\nmy_string = \"hello\"\n\nprint(\"string: %s\\tfloat: %f\\tint: %d\" % (my_string, my_float, my_int))\n\nmy_variable = 5\nif my_variable == 4:\n\tprint(\"my_variable is 4\")\nelse:\n\tprint(\"my_variable is not 4\")\nfor i in range(1, my_variable):\n\tprint(\"i == %d\" % (i))\n\nsentence = \"this is a pen\"\nwords = sentence.split(\" \")\nfor word in words:\n\n\n\tprint(word)\n\nprint(\" ||| \".join(words))\n\nimport sys\nmy_file = open(sys.argv[1], \"r\")\nfor line in my_file:\n\tline = line.strip()\nif len(line) !=0:\n\tprint(line)\n\nmy_file = open(sys.argv[1], \"r\")\n\nfor line in my_file:\n\tline = line.strip()\n\tif len(line) != 0:\n\t\tprint(line)\n\n\n\n\n\n\n\n", "sub_path": "arai/tutorial00/my-program.py", "file_name": "my-program.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "collections.defaultdict", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}]} {"seq_id": "330184897", "text": "import logging\nimport traceback\n\nfrom aiohttp import web\nfrom aiohttp.web_exceptions import HTTPError\n\nimport settings\nfrom core.utils import json_to_str\n\nlog = logging.getLogger(__name__)\n\n\ndef _form_error_response(\n user_message: str, status_code: int, internal_message: str = None\n):\n \"\"\"ะคัƒะฝะบั†ะธั ะดะปั ั„ะพั€ะผะธั€ะพะฒะฐะฝะธั ะพั‚ะฒะตั‚ะฐ ั ะพัˆะธะฑะบะฐะผะธ\n\n :param user_message:\n :param status_code:\n :param internal_message:\n :return:\n \"\"\"\n body = {\n \"errors\": [\n {\n \"message\": user_message,\n \"internal_message\": internal_message,\n }\n ]\n }\n return web.Response(\n body=json_to_str(body).encode(\"utf-8\"),\n content_type=\"application/json\",\n status=status_code\n )\n\n\nasync def request_middleware(app, handler):\n \"\"\"ะŸั€ะพัะปะพะนะบะฐ ะดะปั ะพั‚ะปะพะฒะฐ ะฑะฐะทะพะฒั‹ั… ะพัˆะธะฑะพะบ\n\n :param app:\n :param handler:\n :return:\n \"\"\"\n async def middleware(request):\n try:\n response = await handler(request)\n except HTTPError as e:\n response = _form_error_response(\n user_message=e.reason,\n status_code=e.status_code\n )\n except:\n log.exception(\"Unexpected exception\")\n response = _form_error_response(\n user_message=\"Sorry, the internal server error\",\n internal_message=traceback.format_exc(),\n status_code=500\n )\n\n log.info(f\"{request.method} {request.path}{request.query_string}\")\n response.headers.add(\"Access-Control-Allow-Origin\", settings.ACCESS_CONTROL_ALLOW_ORIGIN)\n response.headers.add(\"Access-Control-Allow-Headers\", settings.ACCESS_CONTROL_ALLOW_HEADERS)\n return response\n\n return middleware\n", "sub_path": "api/middlewares.py", "file_name": "middlewares.py", "file_ext": "py", "file_size_in_byte": 1839, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "aiohttp.web.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 31, "usage_type": "name"}, {"api_name": "core.utils.json_to_str", "line_number": 32, "usage_type": "call"}, {"api_name": "aiohttp.web_exceptions.HTTPError", "line_number": 48, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 57, "usage_type": "call"}, {"api_name": "settings.ACCESS_CONTROL_ALLOW_ORIGIN", "line_number": 62, "usage_type": "attribute"}, {"api_name": "settings.ACCESS_CONTROL_ALLOW_HEADERS", "line_number": 63, "usage_type": "attribute"}]} {"seq_id": "449086432", "text": "from django.core.urlresolvers import reverse_lazy\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views import generic\n\nfrom .models import Article\nfrom .forms import ArticleForm\n\n\nclass BlogHomeListView(generic.ListView):\n template_name = \"blog/article_home.html\"\n model = Article\n\n\ndef article_detail(request, pk):\n article = Article.objects.get(id=pk)\n return render(request, \"blog/article.html\", context={'article': article})\n\n\ndef article_create(request):\n if request.method == \"POST\":\n form = ArticleForm(request.POST, request.FILES)\n\n if form.is_valid():\n try:\n curr_user = User.objects.get(username=request.user.username)\n except User.DoesNotExist:\n curr_user = User.objects.get(username='admin')\n\n instance = form.save(commit=False)\n instance.author = curr_user\n instance.save()\n return HttpResponseRedirect(form.instance.get_absolute_url())\n else:\n form = ArticleForm()\n\n return render(request, \"blog/article_create.html\", context={'form': form})\n\n\ndef article_delete(request, pk):\n success_url = reverse_lazy('blog:blog-home')\n article = Article.objects.get(id=pk)\n\n if request.method == \"POST\" and \"delete\" in request.POST:\n article.delete()\n return HttpResponseRedirect(success_url)\n else:\n return HttpResponseRedirect(article.get_absolute_url())\n\n\ndef article_update(request, pk):\n article = Article.objects.get(id=pk)\n\n if request.method == \"POST\":\n form = ArticleForm(request.POST, request.FILES, instance=article)\n\n if form.is_valid():\n try:\n user = User.objects.get(username=request.user.username)\n except User.DoesNotExist:\n user = User.objects.get(username='admin')\n instance = form.save(commit=False)\n instance.author = user\n instance.save()\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n form = ArticleForm(instance=article)\n\n return render(request, \"blog/article_update.html\", context={'form': form})\n\n", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.views.generic.ListView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Article", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Article.objects.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "forms.ArticleForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 29, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 34, "usage_type": "call"}, {"api_name": "forms.ArticleForm", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Article.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 43, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Article.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 53, "usage_type": "name"}, {"api_name": "forms.ArticleForm", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 60, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 61, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 62, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 66, "usage_type": "call"}, {"api_name": "forms.ArticleForm", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}]} {"seq_id": "623336718", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('featured', '0003_auto_20150521_1436'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AssociatedEvent',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('featured_type', models.CharField(max_length=50, verbose_name=b'featured type')),\n ('featured_url', models.CharField(max_length=50, verbose_name=b'featured url')),\n ('plugin', models.ForeignKey(related_name='featured_event', to='featured.FeaturedPluginModel')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='featuredpluginmodel',\n name='featured_type',\n ),\n migrations.RemoveField(\n model_name='featuredpluginmodel',\n name='featured_url',\n ),\n ]\n", "sub_path": "cmstest/apps/featured/migrations/0004_auto_20150521_1437.py", "file_name": "0004_auto_20150521_1437.py", "file_ext": "py", "file_size_in_byte": 1106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 30, "usage_type": "name"}]} {"seq_id": "475056339", "text": "import win32com.client as win32\nimport os \nimport re\nimport pythoncom\nimport sys\nfrom collections import Counter\nfrom datetime import datetime \niter=sys.argv[1]\nstart=datetime.now()\nprint(\"--------------------------------------------------------------------------------------------------------\")\nprint(\"Document Name:\", iter)\nprint(\"CheckList Rule - 22: Probable Acronyms in Document not defined in Acronym and Definition Table.\")\nprint(\"Document Review Start Time:\", start,\"HH:MM:SS\")\nprint(\"--------------------------------------------------------------------------------------------------------\")\nprint(\"\\n\")\napp=[]\nif iter.endswith('.doc') or iter.endswith('.docx'): \n word1 = win32.Dispatch(\"Word.Application\")\n word1.Visible = True\n p = os.path.abspath(iter)\n word1.Documents.Open(p)\n sheet_1 = word1.ActiveDocument\n for para in sheet_1.Paragraphs:\n a=para.Range.Text.encode('ascii','ignore').decode()\n pattern = r'(?:[A-Z]\\.)+'\n t = re.findall('([A-Z]+)',a)\n for i in t:\n if len(i)>=2: #discarding single capital letters\n app.append(i)\n def get_table_count():\n return sheet_1.Tables.Count\n \n def count_table_rows(table):\n return table.Rows.Count\n \n def count_table_columns(table):\n return table.Columns.Count\n \n def get_headers():\n headers = sheet_1.Sections(1).Headers(1)\n shape_count = headers.Shapes.Count\n for shape_num in range(1, shape_count + 1):\n t_range = headers.Shapes(shape_num).TextFrame.TextRange\n text = t_range.Text\n page_num = t_range.Information(3) # 3 == wdActiveEndPageNumber\n yield text, page_num\n \n def get_table_text(table):\n col_count = count_table_columns(table)\n row_count = count_table_rows(table)\n \n for row in range(1, row_count + 1):\n row_data = []\n for col in range(1, col_count + 1):\n try:\n row_data.append(table.Cell(Row=row,Column=col).Range.Text.strip(chr(7) + chr(13)))\n \n except pythoncom.com_error as error:\n row_data.append(\"\")\n \n yield row_data\n \n def get_all_table_text():\n for table in get_tables():\n table_data = []\n for row_data in get_table_text(table):\n #for col_data in .get_table_text(table):\n #table_data1.append(col_data)\n table_data.append(row_data)\n yield table_data\n #yield table_data1\n \n def get_tables():\n for table in sheet_1.Tables:\n yield table\n \n def __del__():\n word1.Quit()\n try:\n res=[]\n res1=[]\n pp={}\n jj=[]\n jjj=[]\n jjjj=[]\n \n #path = str(input())\n #count=0\n #open_doc = os.path.abspath(path)\n for table_num, table_text in enumerate(get_all_table_text()):\n #print(\"\\n-------------- Table %s ----------------\" % (table_num + 1))\n for row_data in table_text:\n b=\", \".join(row_data)\n b=str(b).encode(\"utf-8\")\n #print(b)\n k=b\"Acronyms\"\n l=b\"Definition\"\n if k in b: \n #print(table_text)\n k=table_text[0]\n #print(k)\n m=k.index('Acronyms')\n #print(m)\n for i in table_text:\n res.append(i[m])\n print(\"----------------------------------------------------------------------------------------------------------------------\")\t\t\n print(\"Acronyms Defined:\\n\",res[1:])\n print(\"----------------------------------------------------------------------------------------------------------------------\")\n res1=res[1:]\n print(\"Probable Acronyms not defined:\\n\",set(app)-set(res1))\n pp=set(app)-set(res1)\n #if len(pp)==0:\n # print(\"\\nStatus:Pass\")\n #else:\n # print(\"\\nStatus:Fail\")\n except:\n pass \n\nend=datetime.now()\nprint(\"\\nDocument Review End Time:\", end)\nprint(\"\\nTime taken For Document Review:\", end-start,\"HH:MM:SS\") \nsheet_1.Close()\nword1.Quit() ", "sub_path": "Debug/setup/source1/acronotfound12/acronotfound12.py", "file_name": "acronotfound12.py", "file_ext": "py", "file_size_in_byte": 3862, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}, {"api_name": "win32com.client.Dispatch", "line_number": 18, "usage_type": "call"}, {"api_name": "win32com.client", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 26, "usage_type": "call"}, {"api_name": "pythoncom.com_error", "line_number": 58, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "name"}]} {"seq_id": "400480820", "text": "from django.db import models\nfrom django.conf import settings\n\nmodels_yaml = settings.MODELS_YAML\n\nmodels_dict = {}\n\ntypes = {\n 'date': models.DateField,\n 'char': models.TextField,\n 'int': models.IntegerField,\n}\n\ndef model_name(title):\n def function(self):\n return title\n return function\n\n\nfor m in models_yaml:\n class_fields = {}\n class_fields['__module__'] = __name__\n class_fields['__unicode__'] = model_name(models_yaml[m]['title'])\n for f in models_yaml[m]['fields']:\n class_fields[f['id']] = types[f['type']](f['title'])\n\n models_dict[m] = type(m, (models.Model,), class_fields)", "sub_path": "project/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.conf.settings.MODELS_YAML", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}]} {"seq_id": "602185353", "text": "import operator\nimport os\nimport sys\nimport math\nfrom flask import Flask\nfrom flask_jsonrpc import JSONRPC\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))\n\nimport mongodb_client\n\nPREFERENCE_MODEL_TABLE_NAME = \"user_preference_model\"\n\nSERVER_HOST = 'localhost'\nSERVER_PORT = 5050\n\n# Flask application\napp = Flask(__name__)\n\n# Flask-JSONRPC\njsonrpc = JSONRPC(app, '/api', enable_web_browsable_api=True)\n\n\n@jsonrpc.method('getPreferenceForUser')\ndef getPreferenceForUser(user_id:str)->list:\n db = mongodb_client.get_db()\n model = db[PREFERENCE_MODEL_TABLE_NAME].find_one({'userId':user_id})\n print(model)\n if model is None:\n return []\n sorted_tuples = sorted(model['preference'].items(), key=operator.itemgetter(1), reverse=True)\n sorted_list = [x[0] for x in sorted_tuples]\n sorted_value_list = [x[1] for x in sorted_tuples]\n\t\n # If the first preference is close to the last one, return empty\n if math.isclose(float(sorted_value_list[0]), float(sorted_value_list[-1]), rel_tol=1e-5):\n return []\n print(sorted_list)\n return sorted_list\n\nif __name__ == '__main__':\n app.run(host=SERVER_HOST, port=SERVER_PORT, debug=True)", "sub_path": "news_recommendation_service/recommendation_service.py", "file_name": "recommendation_service.py", "file_ext": "py", "file_size_in_byte": 1193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_jsonrpc.JSONRPC", "line_number": 21, "usage_type": "call"}, {"api_name": "mongodb_client.get_db", "line_number": 26, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 31, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 36, "usage_type": "call"}]} {"seq_id": "529069657", "text": "# -*- coding: utf-8 -*-\nimport time\nfrom odoo import api, models, _\nfrom odoo.exceptions import UserError\nfrom datetime import datetime\n\n\nclass ReportCostProfit(models.AbstractModel):\n\n _name = 'report.staffcommission_costprofit.report_cost_profit'\n \n def get_cost_profit_data(self, start_date, end_date, doctor, treatment_id):\n dom = [('date_invoice', '>=', start_date),\n ('date_invoice', '<=', end_date),\n ('is_patient', '=', True),\n ('type', '=', 'out_invoice'),\n ('state', 'in', ['open', 'paid'])]\n if doctor:\n dom.append(('dentist', '=', doctor[0]))\n history_ids = self.env['account.invoice'].search(dom)\n prod_dict = {}\n for invoice in history_ids:\n income = 0.0\n overhead_amt = 0.0\n material_cost = 0.0\n if invoice:\n for line in invoice.invoice_line_ids:\n count = 0\n if line.product_id.is_treatment:\n if (treatment_id and line.product_id.id == treatment_id[0]) or not treatment_id:\n income = line.price_subtotal\n overhead_amt = line.product_id.lst_price * (line.product_id.overhead_cost/100)\n material_cost = 0.0\n for consumable in line.product_id.consumable_ids:\n material_cost += consumable.consu_product_id.lst_price * consumable.quantity\n if line.product_id.id in prod_dict:\n count = prod_dict[line.product_id.id][1] + line.quantity\n prod_dict[line.product_id.id][1] = count\n overhead_final = overhead_amt * line.quantity\n material_cost_final = material_cost * line.quantity\n prod_dict[line.product_id.id][2] += income\n prod_dict[line.product_id.id][3] += overhead_final\n prod_dict[line.product_id.id][4] += material_cost_final\n prod_dict[line.product_id.id][5] += income - material_cost_final - overhead_final\n else:\n overhead_final = overhead_amt * line.quantity\n material_cost_final = material_cost * line.quantity\n prod_dict[line.product_id.id] = [line.product_id.name, line.quantity,\n income, overhead_final, material_cost_final,\n income-material_cost_final -overhead_final]\n return [prod_dict]\n\n @api.multi\n def get_report_values(self, docids, data=None):\n if not data.get('form') or not self.env.context.get('active_model') or not self.env.context.get('active_id'):\n raise UserError(_(\"Form content is missing, this report cannot be printed.\"))\n model = self.env.context.get('active_model')\n docs = self.env[model].browse(self.env.context.get('active_id'))\n start_date = data['form']['date_start']\n end_date = data['form']['date_end']\n treatment_id = data['treatment_id']\n doctor = data['doctor']\n final_records = self.get_cost_profit_data(start_date, end_date, doctor, treatment_id)\n period_start = datetime.strptime(start_date, '%Y-%m-%d')\n period_stop = datetime.strptime(end_date, '%Y-%m-%d')\n return {\n 'period_start': period_start,\n 'period_stop': period_stop,\n 'doc_ids': self.ids,\n 'doc_model': 'cost.profit.wizard',\n 'data': data['form'],\n 'docs': docs,\n 'time': time,\n 'get_cost_profit_data': final_records,\n 'treatment_id': treatment_id,\n 'doctor': doctor,\n }\n \n def formatLang(self, value, digits=None, date=False, date_time=False, grouping=True, monetary=False, dp=False,\n currency_obj=False, lang=False):\n if lang:\n self.env.context['lang'] = lang\n return super(ReportCostProfit, self).formatLang(value, digits=digits, date=date, date_time=date_time,\n grouping=grouping, monetary=monetary, dp=dp,\n currency_obj=currency_obj)\n", "sub_path": "Medical_09122019/staffcommission_costprofit/report/report_cost_profit.py", "file_name": "report_cost_profit.py", "file_ext": "py", "file_size_in_byte": 4537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "odoo.models.AbstractModel", "line_number": 8, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 8, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 56, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 53, "usage_type": "name"}]} {"seq_id": "427390101", "text": "try:\n import ameba\n import time\n import uio as io\n import array\n import uctypes\n from utils import fast_copy\nexcept:\n raise ImportError(\"ameba module import failed\")\n\nPERI_ON_BASE = const(0x40000000)\nREG_CPU_PERIPHERAL_CTRL = const(0x02C0)\nSPI_FLASH_BASE = const(0x98000000)\nSPI_FLASH_CTRL_BASE = const(0x40006000)\n\nREG_SPIC_CTRLR0 = const(0x0000)\nREG_SPIC_CTRLR1 = const(0x0004)\nREG_SPIC_SSIENR = const(0x0008)\nREG_SPIC_SR = const(0x0028)\nREG_SPIC_DR0 = const(0x0060)\nREG_SPIC_ADDR_LENGTH = const(0x0118)\n\nclass FlashBdev:\n START_ADDR = 0x100000\n SEC_SIZE = 0x1000 # most flash sector is 4K\n START_SEC = START_ADDR // SEC_SIZE\n NUM_BLK = 252\n def __init__(self, blocks=NUM_BLK):\n self.blocks = blocks\n self.cache_buffer = bytearray(self.SEC_SIZE)\n\n def cmd(self, cmd, data=None, rxbuf=None):\n if len(cmd) != 1:\n raise ValueError('cmd length should be 1 byte')\n\n tmod = 1 if rxbuf is None else 3\n\n # enable CPU's spiflash pin\n ameba.mem32[PERI_ON_BASE + REG_CPU_PERIPHERAL_CTRL] |= (1<<0)\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_SSIENR] &= ~(1<<0)\n if tmod == 3:\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_CTRLR1] = len(rxbuf)\n if data is not None:\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_ADDR_LENGTH] = 0 if data is None else len(data)\n\n ctrlr0 = ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_CTRLR0]\n ctrlr0 &= 0xFFC0FCFF\n ctrlr0 |= ((tmod << 8) | (3 << 20))\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_CTRLR0] = ctrlr0\n\n ameba.mem8[SPI_FLASH_CTRL_BASE + REG_SPIC_DR0] = int.from_bytes(cmd, 1)\n\n if data is not None:\n for i in data:\n ameba.mem8[SPI_FLASH_CTRL_BASE + REG_SPIC_DR0] = i\n\n # activate this transfer\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_SSIENR] |= (1<<0)\n\n counter = 1000000\n while counter:\n reg_sr = ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_SR]\n busy = reg_sr & (1<<0)\n if busy == 0:\n break\n counter -= 1\n\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_SSIENR] &= ~(1<<0)\n\n if rxbuf is not None:\n for i in range(len(rxbuf)):\n rxbuf[i] = ameba.mem8[SPI_FLASH_CTRL_BASE + REG_SPIC_DR0]\n\n ameba.mem32[SPI_FLASH_CTRL_BASE + REG_SPIC_SSIENR] &= ~(1<<0)\n # disable CPU's spiflash pin\n ameba.mem32[PERI_ON_BASE + REG_CPU_PERIPHERAL_CTRL] &= ~(1<<0)\n\n def WREN(self):\n self.cmd(b'\\x06')\n\n def WRDI(self):\n self.cmd(b'\\x04')\n\n def WRSR(self, status):\n self.cmd(b'\\x01', data=status)\n\n @property\n def RDSR(self):\n status = bytearray(1)\n self.cmd(b'\\x05', rxbuf=status)\n return int.from_bytes(status, 1)\n\n @property\n def WIP(self):\n status = self.RDSR\n return status & (1<<0)\n\n @property\n def RDID(self):\n buffer = bytearray(3)\n self.cmd(b'\\x9f', rxbuf=buffer)\n id = int.from_bytes(buffer, 3)\n manufactor_id = (id & 0xFF0000) >> 16\n device_id = id & 0xFFFF\n return (manufactor_id, device_id)\n\n def erasesector(self, addr):\n bytes_list = list(addr.to_bytes(3, 'little'))\n bytes_list.reverse()\n self.cmd(b'\\x20', data = bytearray(bytes_list))\n\n def eraseblock(self, addr):\n bytes_list = list(addr.to_bytes(3, 'little'))\n bytes_list.reverse()\n self.cmd(b'\\xd8', data = bytearray(bytes_list))\n\n def erasechip(self):\n self.cmd(b'\\x60')\n\n def readblocks(self, n, buf):\n flash_cpu_ctrl_address = PERI_ON_BASE + REG_CPU_PERIPHERAL_CTRL\n ameba.mem32[flash_cpu_ctrl_address] |= (1<<0)\n dst_addr = uctypes.addressof(buf)\n src_addr = SPI_FLASH_BASE + (n + self.START_SEC) * self.SEC_SIZE\n fast_copy(dst_addr, src_addr, self.SEC_SIZE)\n ameba.mem32[flash_cpu_ctrl_address] &= ~(1<<0)\n\n def writeblocks(self, n, buf):\n address_base = SPI_FLASH_BASE + (n + self.START_SEC) * self.SEC_SIZE\n spi_status_address = SPI_FLASH_CTRL_BASE + REG_SPIC_SR\n flash_cpu_ctrl_address = PERI_ON_BASE + REG_CPU_PERIPHERAL_CTRL\n\n ameba.mem32[PERI_ON_BASE + REG_CPU_PERIPHERAL_CTRL] |= (1<<0)\n self.WREN()\n self.erasesector((n + self.START_SEC) * self.SEC_SIZE)\n while self.WIP != 0:\n pass\n\n arr = array.array(\"L\", buf)\n\n for index, value in enumerate(arr):\n ameba.mem32[flash_cpu_ctrl_address] |= 0x01\n ameba.mem32[address_base + 4*index] = value\n\n ameba.mem32[PERI_ON_BASE + REG_CPU_PERIPHERAL_CTRL] &= ~(1<<0)\n\n def ioctl(self, op, arg):\n if op == 4: # BP_IOCTL_SEC_COUNT\n return self.blocks\n if op == 5: # BP_IOCTL_SEC_SIZE\n return self.SEC_SIZE\n", "sub_path": "ameba/modules/flashbdev.py", "file_name": "flashbdev.py", "file_ext": "py", "file_size_in_byte": 4855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "ameba.mem32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "ameba.mem8", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ameba.mem8", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 58, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "ameba.mem8", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 74, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "uctypes.addressof", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.fast_copy", "line_number": 125, "usage_type": "call"}, {"api_name": "ameba.mem32", "line_number": 126, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 133, "usage_type": "attribute"}, {"api_name": "array.array", "line_number": 139, "usage_type": "call"}, {"api_name": "ameba.mem32", "line_number": 142, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 143, "usage_type": "attribute"}, {"api_name": "ameba.mem32", "line_number": 145, "usage_type": "attribute"}]} {"seq_id": "501875344", "text": "import logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pickle as pkl\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport torch\nimport torch.utils.data as utils\n\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/..\")\n\nfrom common.argparser import autoencoder_argparse\nfrom common.utility import log_shapes, log_time, torch_device,\\\n time_stp, load_processed_data, logger, sep\nfrom common.torchsummary import summary\n\nfrom models.autoencoder import AutoEncoderBasic\n\n\ndef main(\n model,\n time_stamp,\n device,\n ally_classes,\n advr_1_classes,\n advr_2_classes,\n encoding_dim,\n test_size,\n batch_size,\n n_epochs,\n shuffle,\n lr,\n expt,\n ):\n\n device = torch_device(device=device)\n\n # refer to PrivacyGAN_Titanic for data preparation\n X, y_ally, y_advr_1, y_advr_2 = load_processed_data(\n expt, 'processed_data_X_y_ally_y_advr_y_advr_2.pkl')\n log_shapes(\n [X, y_ally, y_advr_1, y_advr_2],\n locals(),\n 'Dataset loaded'\n )\n\n X_train, X_valid = train_test_split(\n X,\n test_size=test_size,\n stratify=pd.DataFrame(np.concatenate(\n (\n y_ally.reshape(-1, ally_classes),\n y_advr_1.reshape(-1, advr_1_classes),\n y_advr_2.reshape(-1, advr_2_classes),\n ), axis=1)\n )\n )\n\n log_shapes(\n [\n X_train, X_valid,\n ],\n locals(),\n 'Data size after train test split'\n )\n\n scaler = StandardScaler()\n X_train_normalized = scaler.fit_transform(X_train)\n X_valid_normalized = scaler.transform(X_valid)\n\n log_shapes([X_train_normalized, X_valid_normalized], locals())\n\n dataset_train = utils.TensorDataset(torch.Tensor(X_train_normalized))\n dataloader_train = torch.utils.data.DataLoader(\n dataset_train, batch_size=batch_size, shuffle=shuffle, num_workers=2)\n\n dataset_valid = utils.TensorDataset(torch.Tensor(X_valid_normalized))\n dataloader_valid = torch.utils.data.DataLoader(\n dataset_valid, batch_size=batch_size, shuffle=False, num_workers=2)\n\n auto_encoder = AutoEncoderBasic(\n input_size=X_train_normalized.shape[1],\n encoding_dim=encoding_dim\n ).to(device)\n\n criterion = torch.nn.MSELoss()\n adam_optim = torch.optim.Adam\n optimizer = adam_optim(auto_encoder.parameters(), lr=lr)\n\n summary(auto_encoder, input_size=(1, X_train_normalized.shape[1]))\n\n h_epoch = []\n h_valid = []\n h_train = []\n\n auto_encoder.train()\n\n sep()\n logging.info(\"epoch \\t Aencoder_train \\t Aencoder_valid\")\n\n for epoch in range(n_epochs):\n\n nsamples = 0\n iloss = 0\n for data in dataloader_train:\n optimizer.zero_grad()\n\n X_torch = data[0].to(device)\n X_torch_hat = auto_encoder(X_torch)\n loss = criterion(X_torch_hat, X_torch)\n loss.backward()\n optimizer.step()\n\n nsamples += 1\n iloss += loss.item()\n\n if epoch % int(n_epochs/10) != 0:\n continue\n\n h_epoch.append(epoch)\n h_train.append(iloss/nsamples)\n\n nsamples = 0\n iloss = 0\n for data in dataloader_valid:\n X_torch = data[0].to(device)\n X_torch_hat = auto_encoder(X_torch)\n loss = criterion(X_torch_hat, X_torch)\n\n nsamples += 1\n iloss += loss.item()\n h_valid.append(iloss/nsamples)\n\n logging.info('{} \\t {:.8f} \\t {:.8f}'.format(\n h_epoch[-1],\n h_train[-1],\n h_valid[-1],\n ))\n\n config_summary = 'device_{}_dim_{}_batch_{}_epochs_{}_lr_{}_tr_{:.4f}_val_{:.4f}'\\\n .format(\n device,\n encoding_dim,\n batch_size,\n n_epochs,\n lr,\n h_train[-1],\n h_valid[-1],\n )\n\n plt.plot(h_epoch, h_train, 'r--')\n plt.plot(h_epoch, h_valid, 'b--')\n plt.legend(['train_loss', 'valid_loss'])\n plt.title(\"autoencoder training {}\".format(config_summary))\n\n plot_location = 'plots/{}/{}_training_{}_{}.png'.format(\n expt, model, time_stamp, config_summary)\n sep()\n logging.info('Saving: {}'.format(plot_location))\n plt.savefig(plot_location)\n checkpoint_location = \\\n 'checkpoints/{}/{}_training_history_{}_{}.pkl'.format(\n expt, model, time_stamp, config_summary)\n logging.info('Saving: {}'.format(checkpoint_location))\n pkl.dump((h_epoch, h_train, h_valid), open(checkpoint_location, 'wb'))\n\n model_ckpt = 'checkpoints/{}/{}_torch_model_{}_{}.pkl'.format(\n expt, model, time_stamp, config_summary)\n logging.info('Saving: {}'.format(model_ckpt))\n torch.save(auto_encoder, model_ckpt)\n\n\nif __name__ == \"__main__\":\n expt = 'mimic'\n model = 'autoencoder_basic'\n marker = 'A'\n pr_time, fl_time = time_stp()\n\n logger(expt, model, fl_time, 'A')\n\n log_time('Start', pr_time)\n args = autoencoder_argparse()\n main(\n model=model,\n time_stamp=fl_time,\n device=args['device'],\n ally_classes=int(args['n_ally']),\n advr_1_classes=int(args['n_advr_1']),\n advr_2_classes=int(args['n_advr_2']),\n encoding_dim=int(args['dim']),\n test_size=float(args['test_size']),\n batch_size=int(args['batch_size']),\n n_epochs=int(args['n_epochs']),\n shuffle=int(args['shuffle']) == 1,\n lr=float(args['lr']),\n expt=args['expt'],\n )\n log_time('End', time_stp()[0])\n sep()\n", "sub_path": "expt_mimic/autoencoder_training.py", "file_name": "autoencoder_training.py", "file_ext": "py", "file_size_in_byte": 5731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "common.utility.torch_device", "line_number": 39, "usage_type": "call"}, {"api_name": "common.utility.load_processed_data", "line_number": 42, "usage_type": "call"}, {"api_name": "common.utility.log_shapes", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "common.utility.log_shapes", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 70, "usage_type": "call"}, {"api_name": "common.utility.log_shapes", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.autoencoder.AutoEncoderBasic", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 90, "usage_type": "attribute"}, {"api_name": "common.torchsummary.summary", "line_number": 93, "usage_type": "call"}, {"api_name": "common.utility.sep", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 102, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "common.utility.sep", "line_number": 161, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 167, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 168, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 173, "usage_type": "call"}, {"api_name": "common.utility.time_stp", "line_number": 180, "usage_type": "call"}, {"api_name": "common.utility.logger", "line_number": 182, "usage_type": "call"}, {"api_name": "common.utility.log_time", "line_number": 184, "usage_type": "call"}, {"api_name": "common.argparser.autoencoder_argparse", "line_number": 185, "usage_type": "call"}, {"api_name": "common.utility.log_time", "line_number": 201, "usage_type": "call"}, {"api_name": "common.utility.time_stp", "line_number": 201, "usage_type": "call"}, {"api_name": "common.utility.sep", "line_number": 202, "usage_type": "call"}]} {"seq_id": "650395269", "text": "# ๅฏน content.txt ไธญ็š„ๆฏไธ€ๆกๆ•ฐๆฎ่ฟ›่กŒๅˆ†่ฏ\n# ๅˆ†่ฏ่ฟ‡ๅŽ็”จ ' ' ๆ‹ผๆŽฅ่ตทๆฅ๏ผŒ่ฟ˜ๆ˜ฏไฝœไธบไธ€ๆกๆ•ฐๆฎๅญ˜ๆ”พๅœจ content-split-timestamp.txt ๆ–‡ไปถไธญ\nimport time\nimport multiprocessing\nimport jieba\nimport codecs\n\nif __name__ == '__main__':\n curTimeStr = str(int(time.time()))\n stopwords = []\n\n with open('./stop-words.txt', 'r') as f:\n stopwords = [word.strip() for word in f.readlines()]\n f.close()\n\n # ๅŽป้™คๅœ็”จ่ฏ\n def remove_stopwords(ls):\n return [word for word in ls if word not in stopwords]\n\n # ไปฅๅ†™็š„ๆ–นๅผๆ‰“ๅผ€ๅŽŸๅง‹็š„็ฎ€ไฝ“ไธญๆ–‡่ฏญๆ–™ๅบ“\n source_f = codecs.open('./content.txt', 'r', encoding='utf8')\n # ๅฐ†ๅˆ†ๅฎŒ่ฏ็š„่ฏญๆ–™ๅ†™ๅ…ฅๅˆฐ content-split-timestamp.txt ๆ–‡ไปถไธญ\n target_f = codecs.open('./content-split-' + curTimeStr + '.txt', 'w', encoding='utf8')\n\n line = source_f.readline()\n\n # ่ฎพ็ฝฎ jieba ๅˆ†่ฏๆ“ไฝœไธบๅนถ่กŒ\n # parallel ๆ•ฐ้‡ไธบ cpu_count\n jieba.enable_parallel(multiprocessing.cpu_count())\n # jieba ๅŠ ่ฝฝ่‡ชๅฎšไน‰่ฏๅ…ธ\n # ็›ฎๅ‰่‡ชๅฎšไน‰่ฏๅ…ธๆฅๆบ\n # https://github.com/qbox/www/tree/2020/front/2020/constants ไธญ็š„ products && solutions ็›ฎๅฝ•\n jieba.load_userdict('./user-dict.txt')\n\n while line:\n # ๆŽ’้™ค้ฆ–ๅฐพ็ฉบๆ ผ && ๆข่กŒ\n # ็ปŸไธ€่ฝฌๆขไธบๅฐๅ†™\n line = line.strip().lower()\n line_seg = ' '.join(remove_stopwords([word for word in jieba.cut(line)]))\n target_f.writelines(line_seg + '\\n')\n\n line = source_f.readline()\n\n # ๅ…ณ้—ญไธคไธชๆ–‡ไปถๆต\n source_f.close()\n target_f.close()\n", "sub_path": "python-demos/word-vector/split.py", "file_name": "split.py", "file_ext": "py", "file_size_in_byte": 1496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "time.time", "line_number": 9, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 21, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 23, "usage_type": "call"}, {"api_name": "jieba.enable_parallel", "line_number": 29, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 29, "usage_type": "call"}, {"api_name": "jieba.load_userdict", "line_number": 33, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 39, "usage_type": "call"}]} {"seq_id": "290113343", "text": "import pandas\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, precision_recall_curve\nimport numpy as np\n\ndata = pandas.read_csv('data_seta/scores.csv')\n\ny_true = data['true'].to_numpy()\ny_pred = data['pred'].to_numpy()\n\nTP = 0\nTN = 0\nFP = 0\nFN = 0\n\nfor i in range(0, len(y_true)):\n if y_true[i] == 1 and y_pred[i] == 1:\n TP += 1\n if y_true[i] == 0 and y_pred[i] == 1:\n FP += 1\n if y_true[i] == 1 and y_pred[i] == 0:\n FN += 1\n if y_true[i] ==0 and y_pred[i] == 0:\n TN += 1\n\naccuracy = accuracy_score(y_true, y_pred)\nprecision = precision_score(y_true, y_pred)\nrecall = recall_score(y_true, y_pred)\nf = f1_score(y_true, y_pred)\n\nclassifiers = pandas.read_csv('data_seta/classification.csv')\n\ny1_true = classifiers['true'].to_numpy()\nscore_logreg = classifiers['score_logreg'].to_numpy()\nscore_svm = classifiers['score_svm'].to_numpy()\nscore_knn = classifiers['score_knn'].to_numpy()\nscore_tree = classifiers['score_tree'].to_numpy()\n\nlogreg = roc_auc_score(y1_true, score_logreg)\nsvm = roc_auc_score(y1_true, score_svm)\nknn = roc_auc_score(y1_true, score_knn)\ntree = roc_auc_score(y1_true, score_tree)\n\nprc_logred = precision_recall_curve(y1_true, score_logreg)\nprc_svm = precision_recall_curve(y1_true, score_svm)\nprc_knn = precision_recall_curve(y1_true, score_knn)\nprc_tree = precision_recall_curve(y1_true, score_tree)\n\ndef find_biggest_precision_when_recall(array):\n precision, recall, thresholds = array\n\n recall_indx = np.where(recall > 0.7)\n filtered_precision = precision[recall_indx]\n\n return round(max(filtered_precision), 2)\n\n\nprint(find_biggest_precision_when_recall(prc_logred))\nprint(find_biggest_precision_when_recall(prc_svm))\nprint(find_biggest_precision_when_recall(prc_knn))\nprint(find_biggest_precision_when_recall(prc_tree))", "sub_path": "metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 1848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 51, "usage_type": "call"}]} {"seq_id": "135321161", "text": "import os\nimport packaging.version\n\nfrom testconfig import config\n\nfrom unittest import skip\n\nfrom tests.integration.core.constants import UPDATE_TEST_TIMEOUT\nfrom tests.integration.installation_and_upgrade.test_installation_and_upgrade import TestInstallationAndUpgrade\n\n\nclass TestYumUpdate(TestInstallationAndUpgrade):\n def test_yum_update(self):\n \"\"\" Test for lustre kernel is set to boot after yum update\"\"\"\n self.assertGreaterEqual(len(config[\"lustre_servers\"]), 4)\n\n # get a list of hosts\n response = self.chroma_manager.get(\"/api/host/\")\n self.assertEqual(response.successful, True, response.text)\n hosts = response.json[\"objects\"]\n self.assertEqual(len(hosts), len(self.config_servers))\n\n if packaging.version.parse(os.environ[\"UPGRADE_FROM_VER\"]) > packaging.version.parse(\"4.1\"):\n # Ensure that IML notices its storage servers needs upgraded\n for host in hosts:\n # wait for an upgrade available alert\n self.wait_for_assert(lambda: self.assertHasAlert(host[\"resource_uri\"], of_type=\"UpdatesAvailableAlert\"))\n alerts = self.get_list(\"/api/alert/\", {\"active\": True, \"alert_type\": \"UpdatesAvailableAlert\"})\n\n # Should be the only alert\n # FIXME HYD-2101 have to filter these alerts to avoid spurious ones. Once that\n # ticket is fixed, remove the filter so that we are checking that this really is\n # the only alert systemwide as it should be.\n alerts = [a for a in alerts if a[\"alert_item\"] == host[\"resource_uri\"]]\n self.assertEqual(len(alerts), 1)\n\n # Should be an 'updates needed' alert\n self.assertRegexpMatches(alerts[0][\"message\"], \"Updates are ready.*\")\n\n # The needs_update flag should be set on the host\n self.assertEqual(self.get_json_by_uri(host[\"resource_uri\"])[\"needs_update\"], True)\n\n self.assertEqual(len(hosts), len(self.config_servers))\n\n # Even though we have to stop a filesytem to do an upgrade (i.e. no\n # rolling upgrades, we stopped it before doing the upgrade to avoid\n # situations where the O/S upgrade results in an IML that can no\n # longer function with the upgraded O/S.\n\n # Run a yum upgrade while excluding python2 packages\n for server in self.config_servers:\n self.remote_operations.yum_upgrade_exclude_python2_iml(server)\n\n # With the list of hosts, start the upgrade as a single command\n command = self.chroma_manager.post(\n \"/api/command/\",\n body={\n \"jobs\": [{\"class_name\": \"UpdateJob\", \"args\": {\"host_id\": host[\"id\"]}} for host in hosts],\n \"message\": \"Test update of hosts\",\n },\n ).json\n\n # doing updates can include a reboot of the storage server,\n # and perhaps RHN/proxy slowness, so give it some extra time\n # Also note that IML is internally updating nodes in the same\n # HA pair in serial, so the timeout needs to be 2x.\n self.wait_for_command(self.chroma_manager, command[\"id\"], timeout=UPDATE_TEST_TIMEOUT)\n self.wait_for_assert(lambda: self.assertNoAlerts(host[\"resource_uri\"], of_type=\"UpdatesAvailableAlert\"))\n\n for server in self.config_servers:\n kernel = self.remote_operations.default_boot_kernel_path(server)\n self.assertGreaterEqual(kernel.find(\"_lustre\"), 7)\n available_updates = self.remote_operations.yum_check_update(server)\n available_updates = filter(lambda x: \"iml-\" in x, available_updates)\n self.assertEqual(len(available_updates), 0)\n\n # Update corosync on the storage servers\n # N.B. This will also start the FS\n for server in self.config_servers:\n self.remote_command(server[\"fqdn\"], \"chroma-agent convert_targets\")\n\n # Start the filesystem back up\n filesystem = self.get_filesystem_by_name(self.fs_name)\n self.start_filesystem(filesystem[\"id\"])\n\n @skip(\"Repos can't really be retired until at least an n+1 release\")\n def test_no_retired_repos(self):\n \"Test that no retired repos exist after an upgrade\"\n # TODO: this really needs to be more dynamic than using\n # repos that would have been retired many re-\n # leases ago\n retired_repos = [\"robinhood\"]\n current_repos = self.remote_operations.get_chroma_repos()\n for repo in retired_repos:\n self.assertFalse(repo in current_repos, \"Unexpectedly found repo '%s' in %s\" % (repo, current_repos))\n\n def test_obsolete_chroma_diagnostics(self):\n \"\"\"Test that chroma-diagnostics has been obsoleted\"\"\"\n addresses = [server[\"fqdn\"] for server in self.config_servers]\n addresses.append(config[\"chroma_managers\"][0][\"address\"])\n\n for address in addresses:\n chroma_diagnostics_result = self.remote_command(address, \"chroma-diagnostics\")\n self.assertEqual(\n chroma_diagnostics_result.stdout.split(\"\\n\")[0],\n \"chroma-diagnostics no longer exists. Please use 'iml-diagnostics' instead.\",\n )\n\n yum_installed_result = self.remote_command(address, \"rpm -q chroma-diagnostics\", expected_return_code=1)\n self.assertNotEqual(yum_installed_result.exit_status, 0)\n\n # something we can run to clear the storage targets since this\n # test class doesn't use setUp()\n def test_clean_linux_devices(self):\n self.cleanup_linux_devices([s for s in self.TEST_SERVERS if \"worker\" not in s.get(\"profile\", \"\")])\n\n def test_stop_before_update(self):\n # Stop the filesystem. Currently the GUI forces you to stop the filesystem before\n # the buttons to install updates is available as we don't do a kind \"rolling upgrade\".\n filesystem = self.get_filesystem_by_name(self.fs_name)\n if filesystem[\"state\"] != \"stopped\":\n self.stop_filesystem(filesystem[\"id\"])\n", "sub_path": "tests/integration/installation_and_upgrade/test_update_with_yum.py", "file_name": "test_update_with_yum.py", "file_ext": "py", "file_size_in_byte": 6076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tests.integration.installation_and_upgrade.test_installation_and_upgrade.TestInstallationAndUpgrade", "line_number": 12, "usage_type": "name"}, {"api_name": "testconfig.config", "line_number": 15, "usage_type": "name"}, {"api_name": "packaging.version.version.parse", "line_number": 23, "usage_type": "call"}, {"api_name": "packaging.version.version", "line_number": 23, "usage_type": "attribute"}, {"api_name": "packaging.version", "line_number": 23, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tests.integration.core.constants.UPDATE_TEST_TIMEOUT", "line_number": 67, "usage_type": "name"}, {"api_name": "unittest.skip", "line_number": 86, "usage_type": "call"}, {"api_name": "testconfig.config", "line_number": 100, "usage_type": "name"}]} {"seq_id": "64936563", "text": "import os\nimport time\nimport shutil\nimport pickle\n\nfrom common.entity.entities import Command, Node, Type\nfrom common.network.utils import get_ip_address\nfrom common.network.utils import changeIp\nfrom sftp import download_from_ftp\n\n\nclass BBB():\n\n def __init__(self, path, rc_local_dest_path, sfp_server_addr, sftp_port: int = 22,\n ftp_destination_folder: str = None):\n\n # Parameters that define absolute locations inside the Beaglebone\n self.ftpDestinationFolder = ftp_destination_folder\n self.rcLocalDestPath = rc_local_dest_path\n self.configPath = path\n self.sfp_server_addr = sfp_server_addr\n self.sftp_port = sftp_port\n\n # The interface used (ethernet port on the Beaglebone).\n self.interfaceName = 'eth0'\n\n # My configs !\n self.node = Node()\n self.node.type = Type()\n self.node.ipAddress = get_ip_address(self.interfaceName)\n\n # Load the data from the cfg file.\n self.readParameters()\n\n def getInfo(self):\n info = \"{}|{}|{}|{}|{}\" \\\n .format(Command.PING, self.node.name, self.node.type.name, self.node.ipAddress, self.node.type.sha)\n return info\n\n def reboot(self):\n os.system('reboot')\n\n def update_project(self):\n \"\"\"\n Update the project, getting the current version available on the FTP server.\n The responsibility to keep it up to date is all on the server side.\n Always removing the old ones.\n :return: True or False\n \"\"\"\n if type is None:\n return False\n try:\n repo_name = self.node.type.repoUrl.strip().split('/')[-1].split('.')[0]\n\n repo_dir = self.ftpDestinationFolder + repo_name + \"/\"\n if os.path.exists(repo_dir) and os.path.isdir(repo_dir):\n shutil.rmtree(repo_dir)\n time.sleep(1)\n\n if repo_dir.endswith('/') and self.node.rcLocalPath.startswith('/'):\n self.node.rcLocalPath = self.node.rcLocalPath[1:]\n\n download_from_ftp(sftp_server_addr=self.sfp_server_addr, sftp_port=self.sftp_port, path=repo_name,\n destination=repo_dir)\n\n print(\"Downloaded Node repository from FTP server {} at {}\".format(self.node.type.repoUrl, repo_name))\n\n if not os.path.isfile(repo_dir + self.node.rcLocalPath):\n shutil.rmtree(repo_dir)\n raise Exception(\"rc.local not found on path.\")\n\n shutil.copy2((repo_dir + self.node.rcLocalPath), self.rcLocalDestPath)\n print(\"Copied file {} to {}\".format(repo_dir + self.node.rcLocalPath, self.rcLocalDestPath))\n return True\n except Exception as e:\n print(\"{}\".format(e))\n return False\n\n def update(self, desiredNode: Node = None):\n \"\"\"\n @todo : NODE! Update Code !!!!\n Update the bbb with new data and refresh the project.\n :return:\n \"\"\"\n if not desiredNode or not desiredNode.type:\n print('Node/Type is None !')\n return\n if not desiredNode.ipAddress:\n print('IP Address is None !')\n return\n try:\n self.node = desiredNode\n\n res, msg = changeIp(interface_name=self.interfaceName, desired_ip=desiredNode.ipAddress)\n if res:\n self.node.ipAddress = get_ip_address(self.interfaceName)\n print(msg)\n\n with open(\"/etc/hostname\", \"w\") as hostnameFile:\n hostnameFile.write(desiredNode.name.replace(\":\", \"-\"))\n hostnameFile.close()\n\n self.update_project()\n\n self.writeNodeConfig(pickle.dumps(self.node))\n except Exception as e:\n print('Update Error ! {}'.format(e))\n\n def readParameters(self):\n self.readNodeConfig()\n try:\n name = os.popen(\"hostname\", \"r\").readline()[:-1]\n indexes = [i for i, letter in enumerate(name) if letter == \"-\"]\n name = list(name)\n if len(indexes) > 2:\n name[indexes[1]] = \":\"\n\n self.node.name = \"\".join(name)\n except FileNotFoundError:\n self.node.name = \"error-hostname-not-found\"\n\n def readNodeConfig(self):\n try:\n with open(self.configPath, 'rb') as file:\n self.node = pickle.load(file)\n except Exception as e:\n print('Read node config exception {}'.format(e))\n\n def writeNodeConfig(self, data):\n with open(self.configPath, 'wb+') as file:\n file.write(data)\n", "sub_path": "daemon/bbb.py", "file_name": "bbb.py", "file_ext": "py", "file_size_in_byte": 4619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "common.entity.entities.Node", "line_number": 28, "usage_type": "call"}, {"api_name": "common.entity.entities.Type", "line_number": 29, "usage_type": "call"}, {"api_name": "common.network.utils.get_ip_address", "line_number": 30, "usage_type": "call"}, {"api_name": "common.entity.entities.Command.PING", "line_number": 37, "usage_type": "attribute"}, {"api_name": "common.entity.entities.Command", "line_number": 37, "usage_type": "name"}, {"api_name": "os.system", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 56, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 57, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "sftp.download_from_ftp", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 69, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 72, "usage_type": "call"}, {"api_name": "common.entity.entities.Node", "line_number": 79, "usage_type": "name"}, {"api_name": "common.network.utils.changeIp", "line_number": 94, "usage_type": "call"}, {"api_name": "common.network.utils.get_ip_address", "line_number": 96, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 105, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 112, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 125, "usage_type": "call"}]} {"seq_id": "145181554", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n File name: main.py\n Location: Bioinformatics armory\n Project: Rosalind.info\n Author: Alexander Popp\n Date created: 08/26/2018\n Date last modified: 8/26/2018\n Python version: 3.6.5\n Description:\n DBPR: Given the uniprot ID of a protein, return a list of biological\n processes in which the protein is involved (biological processes are\n found in a subsection of the protein's 'Gene Ontology' (GO) section).\n\"\"\"\n\n\nfrom Bio import ExPASy\nfrom Bio import SwissProt\n\n\n# Read input file\ndef read_file(file_name):\n with open(file_name, 'r') as f:\n return f.readline()\n\n\n# Write output file\ndef write_file(file_name, data):\n with open(file_name, 'w') as f:\n for item in data:\n f.write(str(item) + \"\\n\")\n\n\n# Returns list of biological processes\ndef get_processes(prot_id):\n handle = ExPASy.get_sprot_raw(prot_id)\n record = SwissProt.read(handle)\n repos = record.cross_references\n result = []\n\n for i in range(len(repos)):\n if 'GO' in repos[i]:\n if 'P:' in repos[i][2]:\n result.append(repos[i][2][2::])\n result = map(str, result)\n return list(result)\n\n\ndef main():\n result = get_processes(read_file('rosalind_dbpr.txt'))\n write_file('output_dbpr.txt', result)\n\n\nif __name__ == \"__main__\":\n main()\n \n", "sub_path": "Bioinformatics_armory/02-DBPR/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "Bio.ExPASy.get_sprot_raw", "line_number": 38, "usage_type": "call"}, {"api_name": "Bio.ExPASy", "line_number": 38, "usage_type": "name"}, {"api_name": "Bio.SwissProt.read", "line_number": 39, "usage_type": "call"}, {"api_name": "Bio.SwissProt", "line_number": 39, "usage_type": "name"}]} {"seq_id": "412309818", "text": "from __future__ import print_function\n\nimport sys\nimport cdp.cdp_parameter\n\n\nclass ACMEParameter(cdp.cdp_parameter.CDPParameter):\n def __init__(self):\n self.case_id = ''\n self.reference_data_path = ''\n self.test_data_path = ''\n self.viewer_descr = {}\n\n self.sets = []\n self.dataset = ''\n self.run_type = 'model_vs_obs'\n self.variables = []\n self.seasons = ['ANN', 'DJF', 'MAM', 'JJA', 'SON']\n self.regions = ['global']\n self.regrid_tool = 'esmf'\n self.regrid_method = 'conservative'\n self.plevs = []\n\n # Plotting related\n self.main_title = ''\n # self.backend = 'vcs' # No default backend for now, user needs to specify which one\n self.save_netcdf = False\n self.output_format = ['png']\n self.output_format_subplot = []\n self.canvas_size_w = 1212\n self.canvas_size_h = 1628\n self.figsize = [8.5, 11.0]\n self.dpi = 150\n self.arrows = True\n self.logo = False\n\n self.contour_levels = [] # used both in test and reference\n self.test_name = ''\n self.short_test_name = ''\n self.test_title = ''\n # self.test_colormap = 'viridis'\n self.test_colormap = 'cet_rainbow.rgb'\n self.test_units = ''\n\n self.reference_name = ''\n self.reference_title = ''\n # self.reference_colormap = 'viridis'\n self.reference_colormap = 'cet_rainbow.rgb'\n self.reference_units = ''\n\n self.diff_name = ''\n self.diff_title = 'Model - Observation'\n # self.diff_colormap = 'cet_diverging_bwr_55_98_c37'\n self.diff_colormap = 'diverging_bwr.rgb'\n self.diff_levels = []\n self.diff_units = ''\n\n self.multiprocessing = False\n self.distributed = False\n self.num_workers = 4\n\n self.no_viewer = False\n self.debug = False\n\n self.granulate = ['variables', 'seasons', 'regions', 'plevs']\n\n def check_values(self):\n if not hasattr(\n self, 'reference_data_path') or self.reference_data_path == '':\n print('You need to specify reference_data_path in the parameters file or in the command line using --reference_data_path')\n sys.exit()\n if not hasattr(self, 'test_data_path') or self.test_data_path == '':\n print('You need to specify test_data_path in the parameters file or in the command line using --test_data_path')\n sys.exit()\n if hasattr(self, 'multiprocessing') and hasattr(\n self, 'distributed') and self.multiprocessing and self.distributed:\n print(\"Why are you trying to run the diags multiprocessed and distributedly? You can't do this, only choose one or none.\")\n sys.exit()\n if not hasattr(self, 'backend'):\n print(\"You need to define the 'backend' parameter to 'vcs' or 'mpl'/'matplotlib'/'cartopy'.\")\n sys.exit()\n", "sub_path": "acme_diags/acme_parameter.py", "file_name": "acme_parameter.py", "file_ext": "py", "file_size_in_byte": 2972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cdp.cdp_parameter.cdp_parameter", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cdp.cdp_parameter", "line_number": 7, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 74, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}]} {"seq_id": "428243571", "text": "import plotly\nimport plotly.graph_objs as go\nimport json\n\n\ndef create_plot(meters, prediction):\n reading_0 = prediction[\"reading\"].loc[prediction[\"meter\"] == 0]\n reading_1 = prediction[\"reading\"].loc[prediction[\"meter\"] == 1]\n reading_2 = prediction[\"reading\"].loc[prediction[\"meter\"] == 2]\n reading_3 = prediction[\"reading\"].loc[prediction[\"meter\"] == 3]\n timestamp = prediction[\"timestamp\"]\n\n data = []\n\n if meters[0]:\n data.append(go.Scatter(x=timestamp, y=reading_0,\n mode='lines+markers',\n name='Electricity',\n line=dict(color='darkolivegreen'),\n showlegend=False))\n if meters[1]:\n data.append(go.Scatter(x=timestamp, y=reading_1,\n mode='lines+markers',\n name='Chilled Water',\n line=dict(color='aqua'),\n showlegend=False))\n if meters[2]:\n data.append(go.Scatter(x=timestamp, y=reading_2,\n mode='lines+markers',\n name='Steam',\n line=dict(color='aquamarine'),\n showlegend=False))\n if meters[3]:\n data.append(go.Scatter(x=timestamp, y=reading_3,\n mode='lines+markers',\n name='Hot Water',\n line=dict(color='darkturquoise'),\n showlegend=False))\n layout = {\n \"height\": 200,\n \"margin\": go.layout.Margin(\n t=20,\n b=30\n ),\n \"paper_bgcolor\": \"transparent\",\n \"yaxis\": {\n \"linecolor\": \"#001f07\",\n \"mirror\": True,\n \"title\": \"Energy Consumption\"\n },\n \"xaxis\": {\n \"linecolor\": \"#001f07\",\n \"mirror\": True,\n \"tickformat\": \"%H:00 - %b %d\",\n \"tickmode\": \"auto\",\n \"dtick\": \"H3\"\n }\n }\n return json.dumps({\"data\": data, \"layout\": layout}, cls=plotly.utils.PlotlyJSONEncoder)\n", "sub_path": "app/app/energApp/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 2171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "plotly.graph_objs.Scatter", "line_number": 16, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 16, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 22, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 22, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 28, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 28, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 34, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 34, "usage_type": "name"}, {"api_name": "plotly.graph_objs.layout.Margin", "line_number": 41, "usage_type": "call"}, {"api_name": "plotly.graph_objs.layout", "line_number": 41, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 41, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "plotly.utils", "line_number": 59, "usage_type": "attribute"}]} {"seq_id": "643206146", "text": "from rest_framework import permissions\n\n\nclass UpdateOwnProfile(permissions.BasePermission):\n \"\"\"allow user to edit own profile\"\"\"\n\n def has_object_permission(self, request, view, obj):\n \"\"\"check user is trying to edit profiel\"\"\"\n if request.method in permissions.SAFE_METHODS:\n return True\n\n return obj.id == request.user.id\n\n\nclass UpdateOwnStatus(permissions.BasePermission):\n \"\"\"allow usr to upadate their own status\"\"\"\n\n def has_object_permission(self, request, view, obj):\n \"\"\"used to check user is trying to upadte their own stsaus\"\"\"\n if request.method in permissions.SAFE_METHODS:\n return True\n return obj.user_profile.id == request.user.id\n \n", "sub_path": "profiles_api/permissions.py", "file_name": "permissions.py", "file_ext": "py", "file_size_in_byte": 739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "rest_framework.permissions.BasePermission", "line_number": 4, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 4, "usage_type": "name"}, {"api_name": "rest_framework.permissions.SAFE_METHODS", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.permissions.BasePermission", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.permissions.SAFE_METHODS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 20, "usage_type": "name"}]} {"seq_id": "490119811", "text": "import re\nimport pytest\nfrom click.testing import CliRunner\nfrom commands.validate import cli as validate\n\n\n@pytest.mark.validation\nclass TestValidate:\n\n @pytest.mark.datafiles('tests/neoload_projects/example_1/default.yaml')\n def test_success(self, datafiles):\n file_path = datafiles.listdir()[0]\n runner = CliRunner()\n result = runner.invoke(validate, [str(file_path), '--refresh'])\n assert 'Yaml file is valid' in str(result.output)\n assert result.exit_code == 0\n\n @pytest.mark.datafiles('tests/neoload_projects/example_1/default.yaml')\n def test_no_refresh(self, datafiles):\n file_path = datafiles.listdir()[0]\n runner = CliRunner()\n result = runner.invoke(validate, [str(file_path)])\n assert 'Yaml file is valid' in str(result.output)\n assert result.exit_code == 0\n\n @pytest.mark.datafiles('tests/neoload_projects/invalid_to_schema.yaml')\n def test_error(self, datafiles):\n file_path = datafiles.listdir()[0]\n runner = CliRunner()\n result = runner.invoke(validate, [str(file_path), '--refresh'])\n assert 'Wrong Yaml structure' in str(result.output)\n assert 'Additional properties are not allowed (\\'ifyourelookingforcutthroat\\' was unexpected)' in str(\n result.output)\n assert 'On instance:\\n{\\'name\\': \\'NeoLoad-CLI-example-2_0' in str(result.output)\n assert result.exit_code == 1\n\n @pytest.mark.datafiles('tests/neoload_projects/example_1/default.yaml')\n def test_bad_schema(self, datafiles):\n file_path = datafiles.listdir()[0]\n runner = CliRunner()\n result = runner.invoke(validate, [str(file_path), '--schema-url', 'https://www.neotys.com/', '--refresh'])\n assert 'Error: This is not a valid json schema file' in str(result.output)\n assert 'Expecting value: line 1 column 1' in str(result.output)\n assert result.exit_code == 1\n\n def test_no_argument(self):\n runner = CliRunner()\n result = runner.invoke(validate)\n assert re.compile(\".*Error: Missing argument [\\\"']FILE[\\\"'].*\", re.DOTALL).match(result.output) is not None\n assert result.exit_code == 2\n\n @pytest.mark.slow\n @pytest.mark.datafiles('tests/neoload_projects/example_1/default.yaml')\n def test_bad_schema_url(self, datafiles):\n file_path = datafiles.listdir()[0]\n runner = CliRunner()\n result = runner.invoke(validate, [str(file_path), '--schema-url', 'http://invalid.fr', '--refresh'])\n assert 'Max retries exceeded with url' in str(result.output)\n assert 'Failed to establish a new connection' in str(result.output)\n assert 'Error getting the schema from the url: http://invalid.fr' in str(result.output)\n assert result.exit_code == 1\n", "sub_path": "tests/commands/test_validate.py", "file_name": "test_validate.py", "file_ext": "py", "file_size_in_byte": 2793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "click.testing.CliRunner", "line_number": 13, "usage_type": "call"}, {"api_name": "commands.validate.cli", "line_number": 14, "usage_type": "argument"}, {"api_name": "pytest.mark.datafiles", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 21, "usage_type": "call"}, {"api_name": "commands.validate.cli", "line_number": 22, "usage_type": "argument"}, {"api_name": "pytest.mark.datafiles", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 29, "usage_type": "call"}, {"api_name": "commands.validate.cli", "line_number": 30, "usage_type": "argument"}, {"api_name": "pytest.mark.datafiles", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 40, "usage_type": "call"}, {"api_name": "commands.validate.cli", "line_number": 41, "usage_type": "argument"}, {"api_name": "pytest.mark.datafiles", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 47, "usage_type": "call"}, {"api_name": "commands.validate.cli", "line_number": 48, "usage_type": "argument"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 49, "usage_type": "attribute"}, {"api_name": "click.testing.CliRunner", "line_number": 56, "usage_type": "call"}, {"api_name": "commands.validate.cli", "line_number": 57, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pytest.mark.datafiles", "line_number": 53, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}]} {"seq_id": "115613304", "text": "import unittest\nimport requests\nimport json\nimport mgr_functions\n\n\nclass TestQueryOrder(unittest.TestCase):\n def setUp(self):\n self.url = \"http://172.16.17.148:8080/hzhotel/mgrweb-api/order/queryOrder\"\n\n def testQueryOrder(self):\n token = mgr_functions.get_token()\n order_date = json.dumps({\"head\": {\"token\": token, \"channel\": \"\", \"clientId\": \"\"},\n \"currentPage\": 1, \"contactMobile\": \"\", \"orderNo\": \"\", \"pageSize\": 10})\n headers = {\n 'Content-Type': \"application/json\"\n }\n\n response = requests.request('POST', self.url, data=order_date, headers=headers)\n self.result = response.json()\n self.head = self.result['head']\n print(self.result['msg'])\n self.assertEqual(0, self.result['code'])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "interface/hzhotel_mgrweb-api/test_b_queryOrder.py", "file_name": "test_b_queryOrder.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "mgr_functions.get_token", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 19, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 27, "usage_type": "call"}]} {"seq_id": "510985829", "text": "# TestGame.py\n#\n# Copyright (C) 2016 Abhijit Patel Here\n#\n# This program is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General\n# Public License as published by the Free Software\n# Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will\n# be useful, but WITHOUT ANY WARRANTY; without even\n# the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public\n# License for more details.\n#\n# You should have received a copy of the GNU General\n# Public License along with this program; if not, write\n# to the Free Software Foundation, Inc., 51 Franklin\n# St, Fifth Floor, Boston, MA 02110-1301 USA\n#wormgame\nimport random, pygame, sys\nfrom pygame.locals import *\nfrom gi.repository import Gtk\nFPS = 15\nWINDOWWIDTH = 1000\nWINDOWHEIGHT = 680\nCELLSIZE = 20\nassert WINDOWWIDTH % CELLSIZE == 0, \"Window width must be a multiple of cell size.\"\nassert WINDOWHEIGHT % CELLSIZE == 0, \"Window height must be a multiple of cell size.\"\nCELLWIDTH = int(WINDOWWIDTH / CELLSIZE)\nCELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)\n\n \nWHITE = (255, 255, 255)\nBLACK = ( 0, 0, 0)\nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nDARKGREEN = ( 0, 155, 0)\nDARKGRAY = ( 40, 40, 40)\nBGCOLOR = BLACK\nBROWN = (200, 128, 0)\nBLUE = (0, 0, 255)\n \nUP = 'up'\nDOWN = 'down'\nLEFT = 'left'\nRIGHT = 'right'\nUP = 'up'\nDOWN = 'down'\nLEFT = 'left'\nRIGHT = 'right'\nFPSCLOCK= None \nDISPLAYSURF=None\nBASICFONT=None\nHEAD = 0 \nHEAD = 0 \n\ndef main():\n \n\n pygame.init()\n \n \n game = wormgame()\n \n \n game.run()\n \n\nclass wormgame:\n def __init__(self):\n pass\n def run(self):\n global FPSCLOCK, DISPLAYSURF, BASICFONT, WINDOWWIDTH, WINDOWHEIGHT\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n BASICFONT = pygame.font.Font('freesansbold.ttf', 18)\n pygame.display.set_caption('Wormy')\n FPSCLOCK = pygame.time.Clock()\n showStartScreen()\n startx = random.randint(2, CELLWIDTH - 6)\n starty = random.randint(1, 4)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n #showStartScreen()\n # Start the apple in a random place.\n apple = getRandomLocation()\n\n while True: # main game loop\n while Gtk.events_pending():\n Gtk.main_iteration()\n for event in pygame.event.get(): # event handling loop\n if event.type == QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n \n if (event.key == K_LEFT or event.key == K_a) and direction != RIGHT:\n direction = LEFT\n elif (event.key == K_RIGHT or event.key == K_d) and direction != LEFT:\n direction = RIGHT\n elif (event.key == K_UP or event.key == K_w) and direction != DOWN:\n direction = UP\n elif (event.key == K_DOWN or event.key == K_s) and direction != UP:\n direction = DOWN\n elif event.key == K_ESCAPE:\n terminate()\n\n\n if wormCoords[HEAD]['x'] in range(12,32) and wormCoords[HEAD]['y'] == 22:\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n \n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n startx = random.randint(5, CELLWIDTH - 10)\n starty = random.randint(29, CELLHEIGHT - 2)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n\n elif wormCoords[HEAD]['x'] in range(18,38) and wormCoords[HEAD]['y'] == 12:\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n \n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n \n startx = random.randint(5, CELLWIDTH - 10)\n starty = random.randint(29, CELLHEIGHT - 3)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n elif wormCoords[HEAD]['x'] in range(12,44) and wormCoords[HEAD]['y'] == 6:\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n \n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n \n startx = random.randint(5, CELLWIDTH - 5)\n starty = random.randint(29, CELLHEIGHT - 2)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n elif wormCoords[HEAD]['x'] in range(6,44) and wormCoords[HEAD]['y'] == 28:\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n \n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n \n startx = random.randint(5, CELLWIDTH - 10)\n starty = random.randint(29, CELLHEIGHT - 3)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n \n elif wormCoords[HEAD]['x'] == 6 and wormCoords[HEAD]['y'] in range(6,28):\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n \n startx = random.randint(5, CELLWIDTH - 10)\n starty = random.randint(29, CELLHEIGHT - 3)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False \n elif wormCoords[HEAD]['x'] == 12 and wormCoords[HEAD]['y'] in range(12,22):\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n startx = random.randint(3, CELLWIDTH - 5)\n starty = random.randint(1, 5)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n elif wormCoords[HEAD]['x'] == 38 and wormCoords[HEAD]['y'] in range(12,22):\n showGameOverScreen() # game over\n \n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n startx = random.randint(2, 7)\n starty = random.randint(1, 5)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n\n elif wormCoords[HEAD]['x'] == 44 and wormCoords[HEAD]['y'] in range(6,22):\n showGameOverScreen() # game over\n \n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n startx = random.randint(3, CELLWIDTH - 5)\n starty = random.randint(29, CELLHEIGHT - 2)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False \n for wormBody in wormCoords[1:]:\n if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:\n showGameOverScreen() # game over\n is_true = True\n while is_true:\n while Gtk.events_pending():\n Gtk.main_iteration() \n for event in pygame.event.get():\n \n if event.type == pygame.KEYDOWN:\n \n startx = random.randint(5, CELLWIDTH - 10)\n starty = random.randint(29, CELLHEIGHT - 2)\n wormCoords = [{'x': startx, 'y': starty},\n {'x': startx - 1, 'y': starty},\n {'x': startx - 2, 'y': starty}]\n direction = RIGHT\n apple = getRandomLocation()\n is_true = False\n # check if worm has eaten an apply\n if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:\n # don't remove worm's tail segment\n apple = getRandomLocation() # set a new apple somewhere\n else:\n del wormCoords[-1] # remove worm's tail segment\n \n\n # move the worm by adding a segment in the direction it is moving\n if wormCoords[HEAD]['x'] == CELLWIDTH:\n wormCoords[HEAD]['x'] = 0\n\n if wormCoords[HEAD]['x'] == -1:\n wormCoords[HEAD]['x'] = CELLWIDTH\n\n if wormCoords[HEAD]['y'] == CELLHEIGHT:\n wormCoords[HEAD]['y'] = 0\n\n if wormCoords[HEAD]['y'] == -1:\n wormCoords[HEAD]['y'] = CELLHEIGHT \n \n if direction == UP:\n newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] - 1}\n elif direction == DOWN:\n newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] + 1}\n elif direction == LEFT:\n newHead = {'x': wormCoords[HEAD]['x'] - 1, 'y': wormCoords[HEAD]['y']}\n elif direction == RIGHT:\n newHead = {'x': wormCoords[HEAD]['x'] + 1, 'y': wormCoords[HEAD]['y']}\n wormCoords.insert(0, newHead)\n DISPLAYSURF.fill(WHITE)\n \n drawWorm(wormCoords)\n drawApple(apple)\n drawwalls()\n drawScore(len(wormCoords) - 3)\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\ndef drawPressKeyMsg():\n pressKeySurf = BASICFONT.render('Press a key to play.', True, BLACK)\n pressKeyRect = pressKeySurf.get_rect()\n pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)\n DISPLAYSURF.blit(pressKeySurf, pressKeyRect)\n\n\ndef checkForKeyPress():\n \n for event1 in pygame.event.get():\n \n if event1.type == pygame.KEYUP:\n \n return True\ndef showGameOverScreen():\n gameOverFont = pygame.font.Font('freesansbold.ttf', 150)\n gameSurf = gameOverFont.render('Game', True, BLACK)\n overSurf = gameOverFont.render('Over', True, BLACK)\n gameRect = gameSurf.get_rect()\n overRect = overSurf.get_rect()\n gameRect.midtop = (WINDOWWIDTH / 2, 10)\n overRect.midtop = (WINDOWWIDTH / 2, gameRect.height + 10 + 25)\n\n DISPLAYSURF.blit(gameSurf, gameRect)\n DISPLAYSURF.blit(overSurf, overRect)\n drawPressKeyMsg()\n pygame.display.update()\n pygame.time.wait(500)\n return\n \n \ndef showStartScreen():\n snakefont = pygame.font.Font('freesansbold.ttf', 100)\n snakesurf1 = snakefont.render('Wormy', True, BLUE)\n snakesurf2 = snakefont.render('Game!', True, BLUE)\n movx = WINDOWWIDTH / 2\n movy = 50 \n DISPLAYSURF.fill(WHITE)\n snakerect1 = snakesurf1.get_rect()\n snakerect1.topright = (movx,movy)\n DISPLAYSURF.blit(snakesurf1,snakerect1)\n snakerect2 = snakesurf2.get_rect()\n snakerect2.topright = (movx,movy+100)\n DISPLAYSURF.blit(snakesurf2,snakerect2)\n\n drawPressKeyMsg() \n movx += 10\n movy = 50\n \n pygame.display.update()\n FPSCLOCK.tick(10)\n si_true = True\n while si_true:\n while Gtk.events_pending():\n Gtk.main_iteration()\n for event in pygame.event.get():\n \n if event.type == pygame.KEYUP:\n si_true = False\n return \n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\ndef getRandomLocation():\n p = random.randint(0, CELLWIDTH - 1)\n q = random.randint(0, CELLHEIGHT - 1)\n if p in range(12,32) and q == 22:\n p = 5\n q = random.randint(0, CELLHEIGHT - 1)\n if p in range(18,38) and q == 12:\n p = 2\n q = random.randint(0, CELLHEIGHT - 1)\n if p in range(12,44) and q == 6:\n p = 25\n q = random.randint(13, 21)\n if p in range(6,44) and q == 28:\n p = 27\n q = random.randint(13, 19)\n elif p == 6 and q in range(6,28):\n p = 46\n q = random.randint(0, CELLHEIGHT - 1)\n elif p == 12 and q in range(12,22):\n p = 47\n q = random.randint(0, CELLHEIGHT - 1)\n elif p == 38 and q in range(12,22):\n p = 48\n q = random.randint(0, CELLHEIGHT - 1)\n elif p == 44 and q in range(6,22):\n p = 22\n q = random.randint(14, 20)\n else:\n p=p\n q=q\n return {'x': p, 'y': q}\n\n\n\ndef drawScore(score):\n scoreSurf = BASICFONT.render('Score: %s' % (score), True, BLACK)\n scoreRect = scoreSurf.get_rect()\n scoreRect.topleft = (WINDOWWIDTH - 120, 10)\n DISPLAYSURF.blit(scoreSurf, scoreRect)\n\ndef drawwalls():\n wallsrect = pygame.Rect(120, 120, CELLSIZE, 440)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrect)\n\n wallsrecta = pygame.Rect(240, 240, CELLSIZE, 200)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrecta)\n\n wallsrectb = pygame.Rect(120, 560, 760, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrectb) \n\n wallsrectc = pygame.Rect(240, 440, 400, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrectc)\n\n wallsrectc = pygame.Rect(240, 120, 640, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrectc)\n \n wallsrectc = pygame.Rect(880, 120, CELLSIZE, 320)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrectc)\n\n wallsrectc = pygame.Rect(360, 240, 400, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrectc)\n\n wallsrectc = pygame.Rect(760, 240, CELLSIZE, 200)\n pygame.draw.rect(DISPLAYSURF, BROWN, wallsrectc)\n \ndef drawWorm(wormCoords):\n for coord in wormCoords:\n x = coord['x'] * CELLSIZE\n y = coord['y'] * CELLSIZE\n wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, GREEN, wormSegmentRect)\n wormInnerSegmentRect = pygame.Rect(x + 3, y + 3, CELLSIZE -6 , CELLSIZE - 6)\n pygame.draw.rect(DISPLAYSURF, BLUE, wormInnerSegmentRect)\n\n\ndef drawApple(apple):\n x = apple['x'] * CELLSIZE\n y = apple['y'] * CELLSIZE\n appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)\n pygame.draw.rect(DISPLAYSURF, RED, appleRect)\n\n\n\nif __name__ == '__main__':\n main()\n\n\n", "sub_path": "test/TestGame.py", "file_name": "TestGame.py", "file_ext": "py", "file_size_in_byte": 18620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pygame.init", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 79, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 81, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 92, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 92, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 93, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 93, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 97, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 115, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 115, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 116, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 116, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 118, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 120, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 122, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 123, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 135, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 135, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 136, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 136, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 140, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 143, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 144, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 155, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 155, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 156, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 156, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 160, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 163, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 164, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 175, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 175, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 176, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 176, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 180, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 183, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 184, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 196, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 196, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 197, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 197, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 198, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 200, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 203, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 204, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 215, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 215, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 216, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 216, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 217, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 219, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 221, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 222, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 234, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 234, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 235, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 235, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 236, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 236, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 238, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 240, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 241, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 254, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 254, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 255, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 255, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 256, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 258, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 260, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 261, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 273, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 273, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 274, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 274, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 275, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 275, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 277, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 279, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 280, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 323, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 323, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 335, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 335, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 337, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 341, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 341, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 352, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 352, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 353, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 353, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 358, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 358, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 375, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 375, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.events_pending", "line_number": 379, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 379, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_iteration", "line_number": 380, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 380, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 381, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 381, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 383, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 388, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 389, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 393, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 394, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 397, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 400, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 403, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 406, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 409, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 412, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 415, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 418, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 433, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 434, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 434, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 436, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 437, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 437, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 439, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 440, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 440, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 442, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 443, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 443, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 445, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 446, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 446, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 448, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 449, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 449, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 451, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 452, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 452, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 454, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 455, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 455, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 461, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 462, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 462, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 463, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 464, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 464, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 470, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 471, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 471, "usage_type": "attribute"}]} {"seq_id": "650032808", "text": "from tkinter import *\nfrom tkinter import ttk\nfrom tkinter.ttk import Frame, Button\nfrom PIL import Image as PilImage\nfrom PIL import ImageTk\nfrom docxtpl import DocxTemplate\nfrom tkinter import messagebox as mb\nfrom tkinter.scrolledtext import ScrolledText\nimport docx as dc\n\nfrom collections import Counter\nfrom openpyxl import load_workbook\nimport re\n\n\nclass Window4:\n def __init__(self, parent, width, height, title=\"ะ“ะตะฝะตั€ะฐั‚ะพั€ ะ ะฐะฑะพั‡ะตะน ะฟั€ะพะณั€ะฐะผะผั‹ ะดะธัั†ะธะฟะปะธะฝั‹\", resizable=(False, False),\n icon=r\"main.ico\"):\n self.root = Toplevel(parent)\n self.root.title(title)\n self.root.geometry(f\"{width}x{height}+200+200\")\n self.root.resizable(resizable[0], resizable[1])\n self.root.config(bg=\"#1f4b99\")\n\n img = PilImage.open('images/add_png.png')\n img = img.resize((18, 18), PilImage.ANTIALIAS)\n self.photo_image = ImageTk.PhotoImage(img)\n\n img3 = PilImage.open('images/help_png.png')\n img3 = img3.resize((18, 18), PilImage.ANTIALIAS)\n self.photo_image3 = ImageTk.PhotoImage(img3)\n\n self.root.iconbitmap('images/osu2.ico')\n\n self.frm_form = LabelFrame(self.root, relief=SUNKEN, borderwidth=3, text=\"ะŸะปะฐะฝะธั€ัƒะตะผั‹ะต ั€ะตะทัƒะปัŒั‚ะฐั‚ั‹ ะพะฑัƒั‡ะตะฝะธั\")\n self.frm_form.pack(fill=X, ipadx=30, ipady=30, padx=10)\n\n self.label_text1 = Label(self.frm_form,\n text=\n \"ะ”ะพะฑะฐะฒัŒั‚ะต ะฝะตัะบะพะปัŒะบะพ ะบะพะผะฟะตั‚ะตะฝั†ะธะน ั ะฟะพะผะพั‰ัŒัŽ ะบะฝะพะฟะบะธ 'ะ”ะพะฑะฐะฒะธั‚ัŒ'\"\n \" ะธ ัะพั…ั€ะฐะฝะธั‚ะต ะธะทะผะตะฝะตะฝะธั\").pack()\n self.label_text = Label(self.frm_form, text=\"ะšะพะด ะบะพะผะฟะตั‚ะตะฝั†ะธะธ\").pack()\n\n self.combobox = ttk.Combobox(self.frm_form, values=[\n 'ะฃะš-1',\n 'ะฃะš-2',\n 'ะฃะš-3',\n 'ะฃะš-4',\n 'ะฃะš-5',\n 'ะฃะš-1',\n 'ะฃะš-6','ะฃะš-7','ะฃะš-8','ะฃะš-9','ะžะŸะš-1','ะžะŸะš-2','ะžะŸะš-3','ะžะŸะš-4','ะŸะš-1','ะŸะš-2','ะŸะš-3','ะŸะš-4','ะŸะš-5','ะŸะš-6','ะŸะš-7','ะŸะš-8',\n 'ะŸะš-9','ะŸะš-10','ะŸะš-11','ะŸะš-12','ะŸะš-13','ะŸะš-14',\n 'ะŸะš-15','ะŸะš-16','ะŸะš-20','ะŸะš-21','ะŸะš-22','ะŸะš-23','ะŸะš-24'])\n self.combobox.pack()\n\n self.frm_buttons = Frame(self.root)\n self.frm_buttons.pack(fill=X, ipadx=5, ipady=5)\n\n self.btn_submit = Button(self.frm_buttons, text=\"ะ”ะพะฑะฐะฒะธั‚ัŒ\", image=self.photo_image, compound=LEFT,\n command=self.add_zuv)\n self.btn_submit.pack(side=LEFT, padx=10, ipadx=10)\n\n self.btn_clear = Button(self.frm_buttons, text=\"ะกะพั…ั€ะฐะฝะธั‚ัŒ\", command=self.save_zuv)\n self.btn_clear.pack(side=LEFT, padx=10, ipadx=10)\n\n self.btn_help = Button(self.frm_buttons, image=self.photo_image3)\n self.btn_help.pack(side=LEFT, padx=10, ipadx=10)\n\n self.f_top = Frame(self.frm_form) # frame ะดะปั ั‚ะตะบัั‚ะฐ\n self.f_mid = Frame(self.frm_form)\n self.f_bot = Frame(self.frm_form)\n\n self.text_edit = ScrolledText(self.f_top, width=5, height=1, font=(\"Times New Roman\", 11), wrap=WORD)\n self.label_text = Label(self.frm_form, text=\"ะžะฟะธัะฐะฝะธะต ะบะพะผะฟะตั‚ะตะฝั†ะธะธ\").pack()\n self.text_edit2 = ScrolledText(self.f_top, width=45, height=10, font=(\"Times New Roman\", 11), wrap=WORD)\n self.text_edit3 = ScrolledText(self.f_mid, width=5, height=1, font=(\"Times New Roman\", 11), wrap=WORD)\n self.text_edit4 = ScrolledText(self.f_mid, width=45, height=10, font=(\"Times New Roman\", 11), wrap=WORD)\n\n self.f_top.pack()\n self.f_mid.pack()\n\n self.text_edit.pack(side=LEFT)\n self.text_edit2.pack(side=LEFT)\n self.text_edit3.pack(side=LEFT)\n self.text_edit4.pack(side=LEFT)\n\n self.filename1 = \"data/ะšะพะผะฟะตั‚ะตะฝั†ะธะธ.xlsx\"\n\n self.spisok_kod = []\n self.description_zuv = []\n zuv = []\n\n self.add_kod_zuv_spisok = []\n self.add_zuv_spisok = []\n\n self.spisok_z = []\n self.spisok_u = []\n self.spisok_v = []\n\n self.zuv1 = []\n self.zuv2 = []\n self.zuv3 = []\n\n self.wb = load_workbook(self.filename1)\n self.sheet = self.wb['ะ›ะธัั‚1']\n self.parcing()\n\n def parcing(self):\n for row in self.sheet['A1':'A34']:\n string = ''\n for cell in row:\n string = string + str(cell.value)\n your_string = string\n self.spisok_kod.append(your_string)\n\n for row in self.sheet['B1':'B34']:\n string = ''\n for cell in row:\n string = string + str(cell.value)\n your_string = string\n self.description_zuv.append(your_string)\n\n for row in self.sheet['C1':'C5']:\n string = ''\n for cell in row:\n string = string + str(cell.value)\n your_string = string\n self.zuv1.append(your_string)\n\n for row in self.sheet['D1':'D5']:\n string = ''\n for cell in row:\n string = string + str(cell.value)\n your_string = string\n self.zuv2.append(your_string)\n\n for row in self.sheet['E1':'E5']:\n string = ''\n for cell in row:\n string = string + str(cell.value)\n your_string = string\n self.zuv3.append(your_string)\n\n # self.text_edit.insert(1.0, self.description_zuv)\n # print(self.description_zuv[0])\n\n def add_zuv(self):\n self.text_edit.delete(1.0, END)\n self.text_edit2.delete(1.0, END)\n\n self.text_edit.insert(1.0, self.combobox.get())\n self.text_edit2.insert(1.0, self.description_zuv[self.combobox.current()])\n self.text_edit3.insert(2.0, self.combobox.get())\n self.text_edit4.insert(2.0, self.description_zuv[self.combobox.current()] + '\\n\\n')\n\n self.add_kod_zuv_spisok.append(self.combobox.get())\n self.add_zuv_spisok.append(self.description_zuv[self.combobox.current()])\n self.spisok_z.append(self.zuv1[self.combobox.current()])\n self.spisok_u.append(self.zuv2[self.combobox.current()])\n self.spisok_v.append(self.zuv3[self.combobox.current()])\n def save_zuv(self):\n doc = DocxTemplate(\"data/RPD3.docx\")\n\n context = {'description1': self.add_zuv_spisok[0], 'kod_komp1': self.add_kod_zuv_spisok[0],\n 'description2': self.add_zuv_spisok[1], 'kod_komp2': self.add_kod_zuv_spisok[1],\n 'zuv1_1': self.spisok_z[0], 'zuv1_2': self.spisok_u[0], 'zuv1_3': self.spisok_v[0],\n 'zuv2_1': self.spisok_z[1], 'zuv2_2': self.spisok_u[0], 'zuv2_3': self.spisok_v[0]\n }\n doc.render(context)\n doc.save(\"ะณะพั‚ะพะฒั‹ะต ะฟั€ะพะณั€ะฐะผะผั‹/ั€ะฐะฑะพั‡ะฐั ะฟั€ะพะณั€ะฐะผะผะฐ.docx\")\n mb.showinfo(\"ะ’ะฝะธะผะฐะฝะธะต\", \"ะŸะปะฐะฝะธั€ัƒะตะผั‹ะต ั€ะตะทัƒะปัŒั‚ะฐั‚ั‹ ะพะฑัƒั‡ะตะฝะธั ัั„ะพั€ะผะธั€ะพะฒะฐะฝั‹\")", "sub_path": "window4.py", "file_name": "window4.py", "file_ext": "py", "file_size_in_byte": 7078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "PIL.Image.open", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 30, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 31, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 44, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.ttk.Button", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.ttk.Button", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.ttk.Button", "line_number": 66, "usage_type": "call"}, {"api_name": "tkinter.ttk.Frame", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.ttk.Frame", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.ttk.Frame", "line_number": 71, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 75, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 76, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 77, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 104, "usage_type": "call"}, {"api_name": "docxtpl.DocxTemplate", "line_number": 162, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 171, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 171, "usage_type": "name"}]} {"seq_id": "26492690", "text": "\"\"\"\nไฝฟ็”จnumpyๅฎž็Žฐๅท็งฏๆ ธ็š„่ฟ็ฎ—\n\"\"\"\nimport numpy as np\nimport cv2\n\ndef concolve(datMat, kernel):\n \"\"\"\n :param datMat: ่ฆๅค„็†็š„ๆ•ฐๆฎ\n :param kernel: ๅท็งฏๆ ธ\n :return:ๅค„็†ๅฎŒ็š„ๆ•ฐๆฎ\n \"\"\"\n # ่Žทๅ–ๅฝข็Šถ\n m = datMat.shape[0]\n n = datMat.shape[1]\n km = kernel.shape[0]\n kn = kernel.shape[1]\n # ๅˆ›ๅปบๅ…จโ€œ1โ€ๆ•ฐ็ป„\n newMat = np.ones((m, n), dtype=np.uint8)\n tempMat = np.ones((km, kn), dtype=np.uint8)\n\n for row in range(m - km + 1):\n for col in range(n - kn + 1):\n for m_k in range(km):\n for n_k in range(kn):\n tempMat[m_k, n_k] = datMat[(row + m_k), (col + n_k)] * kernel[m_k, n_k]\n newMat[row, col] = np.sum(tempMat)\n\n return newMat\n\n\nCore = np.array(((-0.1, 2, 0.1),\n (-0.2, 0, 0.2),\n (1, -2, -1)), dtype=np.uint8)\nimgCore = np.array(((-0.1, 2, 0.1, 0),\n (-0.2, 0, 0.2, 0),\n (1, -2, -1, 0),\n (0, 0, 0, 0)), dtype=np.uint8)\n\nimg = cv2.imread('./picture.jpg')\nimg = cv2.resize(img, (640, 480))\n# cv2.imshow('Demo_0', img)\nimg_nov = cv2.filter2D(img, -1, Core)\ncv2.imshow('Demo_1', img_nov)\nimg_nov = concolve(imgCore, Core)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "sub_path": "ๅท็งฏๆ ธ.py", "file_name": "ๅท็งฏๆ ธ.py", "file_ext": "py", "file_size_in_byte": 1278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 48, "usage_type": "call"}]} {"seq_id": "154318471", "text": "\"\"\"Verify that a climate file is well formed.\"\"\"\nimport sys\nimport datetime\n\n\ndef main(argv):\n \"\"\"Run for a given file.\"\"\"\n fn = argv[1]\n lines = open(fn).readlines()\n tokens = lines[4].strip().split()\n syear = int(tokens[4])\n # simyears = int(tokens[5])\n linenum = 15\n yesterday = datetime.date(syear - 1, 12, 31)\n while linenum < len(lines):\n tokens = lines[linenum].split()\n if len(tokens) < 4:\n print(\"linenum: %s has len(tokens): %s\" % (linenum, len(tokens)))\n sys.exit()\n thisdate = datetime.date(\n int(tokens[2]), int(tokens[1]), int(tokens[0])\n )\n if (thisdate - yesterday) != datetime.timedelta(days=1):\n print(\n \"linenum: %s has date: %s, not %s\"\n % (linenum, thisdate, yesterday + datetime.timedelta(days=1))\n )\n sys.exit()\n yesterday = thisdate\n lastprecip = -1\n for _ in range(int(tokens[3])):\n linenum += 1\n tokens = lines[linenum].split()\n if len(tokens) != 2:\n print(\"linenum: %s has bad token count\" % (linenum,))\n sys.exit()\n tm = float(tokens[0])\n if tm < 0 or tm >= 24:\n print(\"linenum: %s has bad time: %s\" % (linenum, tokens[0]))\n sys.exit()\n precip = float(tokens[1])\n if precip < 0 or precip >= 350:\n print(\"linenum: %s has bad precip: %s\" % (linenum, tokens[1]))\n sys.exit()\n if precip <= lastprecip:\n print(\n \"linenum: %s has decreasing precip: %s, last %s\"\n % (linenum, tokens[1], lastprecip)\n )\n sys.exit()\n lastprecip = precip\n linenum += 1\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "sub_path": "scripts/cligen/verify_clifile.py", "file_name": "verify_clifile.py", "file_ext": "py", "file_size_in_byte": 1876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "datetime.date", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}]} {"seq_id": "517815210", "text": "from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister\nfrom qiskit import Aer,IBMQ\nfrom qiskit import execute\nfrom qiskit.tools import visualization\nfrom qiskit.tools.visualization import circuit_drawer, plot_histogram\nimport matplotlib.pyplot as plt\nfrom executeCircuit import execute_locally,extractClassical\nimport random as r\nfrom qAlgorithms import QMF\nimport math\nimport random \nimport numpy as np\nimport sys \nfrom qiskit.aqua.components.initial_states import Custom\n\n#######################################################################\n############ QMF TEST FOR A UNIFORM SUPERPOSITION STATE ###############\n########################## FUNCTIONAL #################################\n#######################################################################\n\nn = 4 # quantum state will have 2**n superposition states \nsuperpositions = 2**n\nmaximum_expected = superpositions-1\n\n#IN THIS CASE THE MAXIMUM WILL BE |1111> = 15 \n\nqstate = QuantumRegister(n,name=\"qstate\") #quantum register that contains the superposition\nnew_qr = QuantumRegister(n,name=\"new_qr\") #qantum register for qmf measuring test \nqc = QuantumCircuit(qstate,new_qr)\n\nqc.h(qstate)\nqc.h(new_qr) #new_qr will be measured as |0001> = 1\n\n'''\n\tINIT PARAMS FOR THE QUANTUM MAXIMUM FINDING ALG \n\n__init__(self, circuit=None,search_register=None,num_iterations=None, max_index=None, extra_registers=None,size=None):\n\n\n\n#############################################################################################\n\tIF WITH A UNIFORM SUPERPOSITION STATE LIKE qstate WE CHOOSE INITIAL index = 0 or 1, \n\tWHAT HAPPENS IS THAT THE ORACLE WILL MARK ABOUT N-1 OR N-2 TERMS IN THE SUPERPOSITION\n\tAND SO, THE AVERAGE WILL BECOME NEGATIVE, AND THE INVERSION ABOUT MEAN OPERATOR WILL \n\tNOT WORK PROPERLY, MEANING THAT THE PROBABILITY OF COLLAPSING INTO 0 OR 1 INSTEAD OF \n\tOF AN ACTUAL SOLUTION WILL BECOME LARGER AND LARGER MAKING REACHING THE MAXIMUM IMPOSSIBLE\n\tIN THESE CASES RANDOMLY CHOOSING AN INTIAL GUESS, IS THE BEST STRATEGY FOR ACHIEVING \n\tTHE MAXIMUM.\n############################################################################################## \n'''\n\nindex = int(sys.argv[1])\nalg = QMF(circuit=qc,search_register=qstate,size=superpositions,max_index=index,extra_registers=[new_qr])\n\nBackend = Aer.get_backend('qasm_simulator')\nmaximum, top_measurement = alg.run(backend=Backend,shots=1024)\n\n#measured_val = result['result']\n#maximum = result['top_measurement']\n\nprint(\"MAXIMUM SHOULD BE |\",np.binary_repr(maximum_expected,n),\"> = %d\" % maximum_expected)\nprint(\"MAXIMUM ACHIEVED - %d\\n\" % maximum)\nprint(\"TOP MEASUREMENT\\n\",top_measurement)\n\n\n#######################################################################\n############ QMF TEST FOR AN ARBITRAY CUSTOM INTIAL STATE #############\n########################## FUNCTIONAL #################################\n#######################################################################\n'''\nstate vector array - > numpy array [[Real,imaginary], ... ] for all superposition state\nFor n=4 qubits the first term encodes the amplitude of the state |0000> the second term encodes |0001> and so one.\n\nFor n=4 i'll create the state = |s> = 1/sqrt(2) ( |0100> + |0010> ) and QMF should return\n|0100> = 4 as the maximum value within the state.\n'''\n\nstate=[]\nfor i in range(2**n):\n\tif i == 4 or i == 2:\n\t\tstate.append(complex(1/math.sqrt(2),0.0))\n\telse:\n\t\tstate.append(complex(0.0,0.0))\n\nstate = np.asarray(state)\n\nquantum_state = Custom(n,state_vector=state)\n\nindex = int(sys.argv[1])\nalg = QMF(initial_state=quantum_state,search_register=qstate,size=superpositions,max_index=index)\n\nBackend = Aer.get_backend('qasm_simulator')\nmaximum, top_measurement = alg.run(backend=Backend,shots=1024)\n\n#measured_val = result['result']\n#maximum = result['top_measurement']\n\nprint(\"MAXIMUM SHOULD BE |0100> = 4\\n\")\nprint(\"MAXIMUM ACHIEVED - %d\\n\" % maximum)\nprint(\"TOP MEASUREMENT\\n\",top_measurement)\n\n\n###########################################################################\n############ QMF TEST FOR A STATE THATS NOT IN SUPERPOSITION ##############\n########################## FUNCTIONAL #####################################\n###########################################################################\n'''\nTest if the quantum maximum finding algorithm can find the maximum in a register \ntht's not in superposition, qstate, however it is conditioned by another register, h_state, that's in superposition.\n\nqc has 2 CNOT gates , the result should be |psi> = |11>|11> , giving the maximum on the qstate register of |qstate> = |11> = 3\n'''\nh_state = QuantumRegister(2,name=\"hstate\")\nqstate = QuantumRegister(2,name=\"qstate\")\nqc= QuantumCircuit(h_state,qstate)\n\nh_size = 4\n\nqc.h(h_state)\nqc.cx(h_state[0],qstate[0])\nqc.cx(h_state[1],qstate[1])\n\nalg = QMF(circuit=qc,search_register=qstate,size=h_size,extra_registers=[h_state],draw_circuit=True)\n\nBackend = Aer.get_backend('qasm_simulator')\nmaximum, top_measurement = alg.run(backend=Backend,shots=1024)\n\n#measured_val = result['result']\n#maximum = result['top_measurement']\n\nprint(\"MAXIMUM SHOULD BE |1> = 1\\n\")\nprint(\"MAXIMUM ACHIEVED - %d\\n\" % maximum)\nprint(\"TOP MEASUREMENT\\n\",top_measurement)\n\n", "sub_path": "qmf_test.py", "file_name": "qmf_test.py", "file_ext": "py", "file_size_in_byte": 5153, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "qiskit.QuantumRegister", "line_number": 27, "usage_type": "call"}, {"api_name": "qiskit.QuantumRegister", "line_number": 28, "usage_type": "call"}, {"api_name": "qiskit.QuantumCircuit", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}, {"api_name": "qAlgorithms.QMF", "line_number": 53, "usage_type": "call"}, {"api_name": "qiskit.Aer.get_backend", "line_number": 55, "usage_type": "call"}, {"api_name": "qiskit.Aer", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.binary_repr", "line_number": 61, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 85, "usage_type": "call"}, {"api_name": "qiskit.aqua.components.initial_states.Custom", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 89, "usage_type": "attribute"}, {"api_name": "qAlgorithms.QMF", "line_number": 90, "usage_type": "call"}, {"api_name": "qiskit.Aer.get_backend", "line_number": 92, "usage_type": "call"}, {"api_name": "qiskit.Aer", "line_number": 92, "usage_type": "name"}, {"api_name": "qiskit.QuantumRegister", "line_number": 113, "usage_type": "call"}, {"api_name": "qiskit.QuantumRegister", "line_number": 114, "usage_type": "call"}, {"api_name": "qiskit.QuantumCircuit", "line_number": 115, "usage_type": "call"}, {"api_name": "qAlgorithms.QMF", "line_number": 123, "usage_type": "call"}, {"api_name": "qiskit.Aer.get_backend", "line_number": 125, "usage_type": "call"}, {"api_name": "qiskit.Aer", "line_number": 125, "usage_type": "name"}]} {"seq_id": "408030488", "text": "from TexasHoldemConstants import *\nfrom TexasHoldemDeck import *\nfrom Player import *\nfrom TexasHoldemComparisonEngine import *\nfrom random import randint\nimport time\n\nimport pymongo\nfrom bson.objectid import ObjectId\n\nclass TexasHoldem:\n def __init__(self,\n humans=0,\n bots=2,\n ante=0,\n buyIn=None,\n minimumFirstBet=10,\n statisticsMode=False,\n clearDB=False,\n pymongoClientHost=\"localhost\",\n pymongoClientPort=27017,\n autoStart=True,\n verbose=False,\n debug=False,\n showBotCards=False,\n readable=False,\n readTime=3\n ):\n\n if(buyIn is None):\n buyIn = 500\n\n self.Verbose = verbose\n self.Debug = debug\n self.ShowBotCards = (self.Verbose and showBotCards)\n self.Readable = readable\n self.ReadTime = readTime\n self.PlayerCount = humans + bots\n self.Humans = humans\n self.Bots = bots\n self.BUY_IN = buyIn\n self.MINIMUM_FIRST_BET = minimumFirstBet\n self.Deck = TexasHoldemDeck()\n self.FlopCards = []\n self.TurnCard = None\n self.RiverCard = None\n self.CommunityCards = []\n self.Pot = 0\n self.Rounds = 0\n self.Comparison = TexasHoldemComparisonEngine()\n self.StatisticsMode = statisticsMode\n self.PymongoClientHost = pymongoClientHost\n self.PymongoClientPort = pymongoClientPort\n self.PymongoClient = pymongo.MongoClient(self.PymongoClientHost, self.PymongoClientPort)\n self.PymongoDB = self.PymongoClient.texasholdem\n self.GameId = None\n self.RoundId = None\n self.HumansInRound = 0\n self.FirstBetMade = False\n self.MinimumBet = 0\n\n if self.Debug:\n self.Verbose = True\n\n if clearDB:\n self.PymongoDB.hands.drop()\n self.PymongoDB.winning_hands.drop()\n self.PymongoDB.split_hands.drop()\n self.PymongoDB.losing_hands.drop()\n self.PymongoDB.games.drop()\n self.PymongoDB.rounds.drop()\n\n\n if(autoStart):\n self.NewGame()\n\n def CanShowCards(self, player):\n if self.HumansInRound <= 0:\n return True\n\n if player.IsHuman():\n return True\n\n if player.IsBot() and self.ShowBotCards:\n return True\n\n if self.RoundSection == SHOWDOWN:\n return True\n\n return False\n\n def __str__(self):\n s = \"\\nGame Status: \\n\"\n\n if(len(self.FlopCards) > 0):\n s += \"Flop: \"\n for c in range(len(self.FlopCards)):\n card = self.FlopCards[c]\n s += str(card)\n if(not c == len(self.FlopCards)-1):\n s += ', '\n s += \"\\n\"\n\n if(self.TurnCard is not None):\n s += \"Turn: \" + str(self.TurnCard) + \"\\n\"\n\n if(self.RiverCard is not None):\n s += 'River: ' + str(self.RiverCard) + \"\\n\"\n\n if(len(self.Players) > 0):\n s += '\\n'\n for p in range(len(self.Players)):\n player = self.Players[p]\n s += player.Name + ' (' + str(player.Chips) + '): '\n if self.CanShowCards(player):\n for c in range(len(player.Hand)):\n s += str(player.Hand[c])\n if(not c == len(player.Hand) - 1):\n s += ', '\n\n if(player.HandValue >= HIGH_CARD):\n s += ' (' + str(self.Comparison.HandValueString(player.HandValue)) + ')'\n\n if(player.Folded):\n s += ' [FOLDED]'\n\n s += '\\n'\n\n return s\n\n def AddPlayer(self, name=None, buyIn=None, controlled=False):\n playerNumber = len(self.Players)\n\n if(buyIn is None):\n buyIn = self.BUY_IN\n\n if(name is None):\n name = \"Player \" + str(playerNumber+1)\n\n self.Players.append(Player(playerNumber=playerNumber, name=name, chips=buyIn, controlled=controlled))\n\n def Deal(self):\n if self.Verbose:\n print(\"-------------------------------- DEAL -----------------------------------------\")\n\n for r in range(2):\n for i in range(len(self.Players)):\n player = self.Players[i]\n card = self.Deck.DrawCardFromTop()\n player.TakeCard(card)\n\n if(self.Verbose and self.CanShowCards(player)):\n print(player.Name + ' dealt ' + str(card))\n\n def PlayerBet(self, player):\n if self.Debug and self.Verbose:\n print(player.Name, ' is making a player bet')\n\n bet = 0\n if player.Chips > 1:\n if player.IsHuman():\n while bet < self.MINIMUM_FIRST_BET:\n betInput = input(player.Name + ', place a bet: ')\n bet = int(betInput)\n\n if bet > player.Chips:\n bet = player.Chips\n\n if bet < self.MINIMUM_FIRST_BET and self.Verbose:\n print(player.Name, ', you must bet at least: ', self.MINIMUM_FIRST_BET)\n else:\n while bet < 1 or bet > player.Chips:\n minBet = self.MinimumBet\n maxBet = self.MinimumBet + int(0.5*player.Chips) + 1\n bet = randint(minBet, maxBet)\n\n if self.Debug and self.Verbose:\n print('Min Bet: ', minBet, ', Max Bet: ', maxBet, ', Bet: ', bet)\n else:\n bet = player.Chips\n\n if self.Debug and self.Verbose:\n print(player.Name, ' only has ', bet, ' chips to bet')\n\n self.Pot += player.PlaceBet(bet)\n self.MinimumBet = bet\n\n self.FirstBetMade = True\n\n if self.Verbose:\n print(player.Name, ' bets ', bet)\n\n def PlayerCall(self, player):\n self.Pot += player.Call(self.MinimumBet)\n\n if self.Verbose:\n print(player.Name, ' calls')\n\n def PlayerCheck(self, player):\n player.Check()\n\n if self.Verbose:\n print(player.Name, ' checks')\n\n def PlayerRaise(self, player):\n if self.Debug and self.Verbose:\n print('Player has chosen to raise')\n\n if player.Chips > self.MinimumBet:\n raiseChips = 0\n if player.IsHuman():\n while raiseChips <= 0:\n raiseChips = int(input(player.Name + ', enter the amount you would like to raise by: '))\n else:\n while raiseChips < 1 or raiseChips > (player.Chips):\n minRaise = self.MinimumBet + 1\n maxRaise = player.Chips\n raiseChips = randint(minRaise, maxRaise)\n\n if self.Debug and self.Verbose:\n print('Min Raise: ', minRaise, ', Max Raise: ', maxRaise, ', Actual Raise: ', raiseChips)\n\n self.Pot += player.Raise(self.MinimumBet, raiseChips)\n self.MinimumBet = self.MinimumBet + raiseChips\n\n if self.Verbose:\n print(player.Name, ' raises by ', raiseChips)\n else:\n # assume they meant 'call'\n self.Pot += player.Call(self.MinimumBet)\n\n if self.Verbose:\n print('Player could not raise, called instead')\n\n def PlayerFold(self, player):\n player.Fold()\n\n if player.IsHuman():\n self.HumansInRound -= 1\n\n if self.Verbose:\n print(player.Name, ' folds')\n\n def TakeBets(self):\n if self.Verbose:\n print(\"------------------------------- TAKING BETS ------------------------------------\")\n\n self.FirstBetMade = False\n doneBetting = False\n self.MinimumBet = 0\n # while not doneBetting:\n\n if self.Debug and self.Verbose:\n print('About to take first round of bets')\n\n for p in range(len(self.Players)):\n player = self.Players[p]\n\n if not player.Folded:\n if not self.FirstBetMade:\n if self.Debug and self.Verbose:\n print('Have not made first bet')\n\n # options are to bet, check\n check = False\n if player.IsHuman():\n checkBet = input(player.Name + ' check or bet? (K/b)')\n if checkBet == 'k' or checkBet == 'K' or checkBet == 'c' or checkBet == 'C':\n check = True\n else:\n check = (randint(0, 10) == 5)\n\n if check and not self.StatisticsMode:\n self.PlayerCheck(player)\n else:\n self.PlayerBet(player)\n else:\n if self.Debug and self.Verbose:\n print('We have made the first bet')\n\n # options are to call, raise, check, or fold\n calls = False\n raises = False\n checks = False\n folds = True\n\n if player.IsHuman():\n callRaiseCheckFold = input(player.Name + ', would you like to call, raise, check, or fold? (C/r/k/f)')\n if callRaiseCheckFold == 'C' or callRaiseCheckFold == 'c':\n calls = True\n elif callRaiseCheckFold == 'R' or callRaiseCheckFold == 'r':\n raises = True\n elif callRaiseCheckFold == 'K' or callRaiseCheckFold == 'k':\n checks = True\n else:\n calls = (randint(0, 1) == 1)\n raises = (randint(0, 1) == 1)\n checks = (randint(0, 1) == 1)\n\n if calls or self.StatisticsMode:\n self.PlayerCall(player)\n elif raises:\n self.PlayerRaise(player)\n elif checks:\n self.PlayerCheck(player)\n else:\n self.PlayerFold(player)\n\n if not self.EverybodyFolded():\n for p in range(len(self.Players)):\n if self.EverybodyFolded():\n break\n else:\n player = self.Players[p]\n if (not player.Folded) and player.Checked:\n if not self.FirstBetMade:\n # options are to bet or fold\n bets = False\n if player.IsHuman():\n betFold = input(player.Name + ', either bet or fold (B/f): ')\n if betFold == 'B' or betFold == 'b':\n bets = True\n else:\n bets = False\n else:\n bets = (randint(0, 1) == 1)\n\n if bets or self.StatisticsMode:\n self.PlayerBet(player)\n else:\n self.PlayerFold(player)\n\n else:\n # options are call, raise, fold\n calls = False\n raises = False\n checks = False\n folds = True\n\n if player.IsHuman():\n callRaiseFold = input(player.Name + ', would you like to call, raise, or fold? (C/r/k/f)')\n if callRaiseFold == 'C' or callRaiseFold == 'c':\n calls = True\n elif callRaiseFold == 'R' or callRaiseFold == 'r':\n raises = True\n else:\n calls = (randint(0, 1) == 1)\n raises = (randint(0, 1) == 1)\n\n if calls or self.StatisticsMode:\n self.PlayerCall(player)\n elif raises:\n self.PlayerRaise(player)\n else:\n self.PlayerFold(player)\n\n if not self.EverybodyFolded():\n for p in range(len(self.Players)):\n if self.EverybodyFolded():\n break\n else:\n player = self.Players[p]\n if not (player.Bet >= self.MinimumBet or player.Folded):\n # if they aren't up to bet, they need to meet the raise\n calls = False\n if player.IsHuman():\n callFold = input(player.Name + ', either call the raise amount or fold (C/f): ')\n\n if callFold == 'C' or callFold == 'c':\n calls = True\n\n else:\n calls = (randint(0, 1) == 1)\n\n if calls or self.StatisticsMode:\n self.PlayerCall(player)\n else:\n self.PlayerFold(player)\n\n # now set all of the checked players back to normal Status\n for p in range(len(self.Players)):\n self.Players[p].Checked = False\n self.Players[p].Bet = 0\n\n\n def Flop(self):\n if self.Verbose:\n print(\"------------------------------------ FLOP -------------------------------------\")\n\n self.Deck.BurnCard()\n for c in range(3):\n card = self.Deck.DrawCardFromTop()\n self.FlopCards.append(card)\n\n if self.Verbose:\n print('Flop Card ' + str(c) + ': ' + str(card))\n\n self.UpdateCommunityCards()\n\n def Turn(self):\n if self.Verbose:\n print(\"------------------------------------- TURN -------------------------------------\")\n\n self.Deck.BurnCard()\n self.TurnCard = self.Deck.DrawCardFromTop()\n self.UpdateCommunityCards()\n\n if self.Verbose:\n print('Turn Card: ' + str(self.TurnCard))\n\n def River(self):\n if self.Verbose:\n print(\"--------------------------------- RIVER -----------------------------------------\")\n\n self.Deck.BurnCard()\n self.RiverCard = self.Deck.DrawCardFromTop()\n self.UpdateCommunityCards()\n\n if self.Verbose:\n print('River Card: ' + str(self.RiverCard))\n\n def UpdateCommunityCards(self):\n self.CommunityCards = []\n if(len(self.FlopCards) > 0):\n for c in range(len(self.FlopCards)):\n self.CommunityCards.append(self.FlopCards[c])\n\n if(not self.TurnCard is None):\n self.CommunityCards.append(self.TurnCard)\n\n if(not self.RiverCard is None):\n self.CommunityCards.append(self.RiverCard)\n\n def SortPlayersByHandValue(self):\n n = len(self.Players)\n\n swapped = False\n for i in range(n):\n swapped = False\n for j in range(0, n-1):\n shouldSwap = False\n\n if(self.Players[j].HandValue > self.Players[j+1].HandValue):\n shouldSwap = True\n else:\n if(self.Players[j].HandValue == self.Players[j+1].HandValue):\n handComparison = self.Comparison.CompareHandValue(self.Players[j].Hand, self.Players[j+1].Hand, self.CommunityCards, self.Players[j].HandValue)\n if(handComparison == 1):\n shouldSwap = True\n elif(handComparison == 0):\n self.Players[j].SameHandValueAs = self.Players[j+1].PlayerNumber\n self.Players[j+1].SameHandValueAs = self.Players[j].PlayerNumber\n\n if(shouldSwap):\n tempPlayer = self.Players[j+1]\n self.Players[j+1] = self.Players[j]\n self.Players[j] = tempPlayer\n swapped = True\n\n if not swapped:\n break\n\n for i in range(n):\n swapped = False\n for j in range(0, n-1):\n shouldSwap = False\n\n if(self.Players[j+1].Folded):\n shouldSwap = True\n\n if(shouldSwap):\n tempPlayer = self.Players[j+1]\n self.Players[j+1] = self.Players[j]\n self.Players[j] = tempPlayer\n swapped = True\n\n if not swapped:\n break\n\n self.Players = self.Players[::-1]\n\n def Showdown(self):\n if self.Verbose:\n print(\"----------------------------- SHOWDOWN ----------------------------------------\")\n\n highestPlayer = None\n\n for p in range(len(self.Players)):\n player = self.Players[p]\n handValue = self.Comparison.HandValue(player.Hand, self.CommunityCards)\n self.Players[p].HandValue = handValue\n\n self.SortPlayersByHandValue()\n\n leader = self.Players[0]\n winners = []\n winners.append(leader)\n p = 1\n while p < len(self.Players):\n player = self.Players[p]\n if(player.SameHandValueAs == leader.PlayerNumber):\n winners.append(player)\n\n p += 1\n\n return winners\n\n def EverybodyFolded(self):\n playersFolded = 0\n p = 0\n while p < len(self.Players):\n if self.Players[p].Folded:\n playersFolded += 1\n\n p += 1\n\n\n if(playersFolded >= len(self.Players) - 1):\n return True\n else:\n return False\n\n def NewGame(self):\n self.Deck = TexasHoldemDeck()\n self.FlopCards = []\n self.TurnCard = None\n self.RiverCard = None\n self.CommunityCards = []\n self.Pot = 0\n self.Rounds = 0\n\n names = [\n 'Josh',\n 'Janae',\n 'Michelle',\n 'Tyson',\n 'Chollin'\n ]\n\n self.Players = []\n for i in range(self.Bots):\n r = randint(0, len(names)-1)\n name = names[r]\n names.pop(r)\n self.AddPlayer(name=name)\n\n for i in range(self.Humans):\n r = randint(0, len(names)-1)\n name = names[r]\n names.pop(r)\n # self.AddPlayer(name=name, controlled=True)\n self.AddPlayer(name=name, controlled=True)\n\n self.PlayersOut = []\n\n if self.StatisticsMode:\n self.GameId = self.PymongoDB.games.insert_one({\n 'players': len(self.Players)\n }).inserted_id\n\n self.Play()\n\n def CommunityCardsToJSON(self):\n json = {}\n json['flop'] = {}\n if self.FlopCards[0] and self.FlopCards[1] and self.FlopCards[2]:\n json['flop']['0'] = self.FlopCards[0].ToJSON()\n json['flop']['1'] = self.FlopCards[1].ToJSON()\n json['flop']['2'] = self.FlopCards[2].ToJSON()\n\n if self.TurnCard:\n json['turn'] = self.TurnCard.ToJSON()\n\n if self.RiverCard:\n json['river'] = self.RiverCard.ToJSON()\n\n return json\n\n def Wait(self):\n if(self.ReadTime < 1):\n input('')\n else:\n time.sleep(self.ReadTime)\n\n def Play(self):\n self.Rounds = 0\n\n while True:\n if(self.StatisticsMode):\n roundPlayers = []\n for p in range(len(self.Players)):\n roundPlayers.append(self.Players[p])\n\n self.RoundId = self.PymongoDB.rounds.insert_one({\n 'players': len(self.Players),\n 'gameId': self.GameId\n }).inserted_id\n\n self.HumansInRound = 0\n for p in range(len(self.Players)):\n player = self.Players[p]\n if player.IsHuman():\n self.HumansInRound += 1\n\n # self.AnteUp()\n\n self.RoundSection = DEAL\n self.Deal()\n self.TakeBets()\n if self.Verbose:\n print(self)\n if self.Readable:\n self.Wait()\n\n if not self.EverybodyFolded():\n self.RoundSection = FLOP\n self.Flop()\n self.TakeBets()\n if self.Verbose:\n print(self)\n if(self.Readable):\n self.Wait()\n\n\n if not self.EverybodyFolded():\n self.RoundSection = TURN\n self.Turn()\n self.TakeBets()\n if self.Verbose:\n print(self)\n if self.Readable:\n self.Wait()\n\n if not self.EverybodyFolded():\n self.RoundSection = RIVER\n self.River()\n self.TakeBets()\n if self.Verbose:\n print(self)\n if self.Readable:\n self.Wait()\n\n if not self.EverybodyFolded():\n self.RoundSection = SHOWDOWN\n roundWinners = self.Showdown()[:]\n if self.Verbose:\n print(self)\n if self.Readable:\n self.Wait()\n\n numWinners = len(roundWinners)\n\n extraChips = self.Pot % numWinners\n self.Pot -= extraChips\n distributedChips = self.Pot / numWinners\n\n for p in range(len(roundWinners)):\n player = roundWinners[p]\n player.Chips += distributedChips\n\n if numWinners > 1:\n for p in range(len(roundWinners)):\n splitter = roundWinners[p]\n if self.StatisticsMode:\n self.PymongoDB.split_hands.insert_one({\n 'player': splitter.ToJSON(),\n 'community': self.CommunityCardsToJSON(),\n 'splitAmong': numWinners,\n 'playersPlayed': len(self.Players),\n 'everybodyFolded': False,\n 'gameId': self.GameId,\n 'roundId': self.RoundId\n })\n else:\n winner = roundWinners[0]\n if self.StatisticsMode:\n self.PymongoDB.winning_hands.insert_one({\n 'player': winner.ToJSON(),\n 'community': self.CommunityCardsToJSON(),\n 'playersPlayed': len(self.Players),\n 'everybodyFolded': False,\n 'gameId': self.GameId,\n 'roundId': self.RoundId\n })\n\n winner.Chips += self.Pot\n\n for p in range(len(self.Players)):\n player = self.Players[p]\n\n isWinner = False\n for w in range(len(roundWinners)):\n if player.PlayerNumber == roundWinners[w].PlayerNumber:\n isWinner = True\n break\n\n if not isWinner and self.StatisticsMode:\n self.PymongoDB.losing_hands.insert_one({\n 'player': player.ToJSON(),\n 'community': self.CommunityCardsToJSON(),\n 'playersPlayed': len(self.Players),\n 'everybodyFolded': False,\n 'gameId': self.GameId,\n 'roundId': self.RoundId\n })\n else:\n for p in range(len(self.Players)):\n if not self.Players[p].Folded:\n roundWinner = self.Players[p]\n break\n\n roundWinner.Chips += self.Pot\n\n if self.StatisticsMode:\n print(self.EverybodyFolded())\n self.PymongoDB.winning_hands.insert_one({\n 'player': roundWinner.ToJSON(),\n 'community': self.CommunityCardsToJSON(),\n 'playersPlayed': len(self.Players),\n 'everybodyFolded': True,\n 'gameId': self.GameId,\n 'roundId': self.RoundId\n })\n\n # reset everything for the next round\n self.FlopCards = []\n self.TurnCard = None\n self.RiverCard = None\n self.CommunityCards = []\n self.Deck = TexasHoldemDeck()\n self.Pot = 0\n\n p = 0\n while p < len(self.Players):\n player = self.Players[p]\n if(player.Chips <= 0):\n self.PlayersOut.append(player)\n self.Players.pop(p)\n if self.Verbose:\n print(player.Name, ' is out')\n\n p += 1\n\n if len(self.Players) > 1:\n for p in range(len(self.Players)):\n self.Players[p].ResetHand()\n self.Players[p].Folded = False\n self.Players[p].Checked = False\n self.Players[p].SameHandValueAs = -1\n else:\n break\n\n self.Rounds += 1\n if self.Verbose:\n print(self)\n\n self.Winner = self.Players[0]\n if self.Verbose:\n print(\"------------------------------- GAME OVER -------------------------------------\")\n print('Winner: ')\n print(self.Winner)\n if self.StatisticsMode:\n self.PymongoDB.games.update_one(\n {'_id': self.GameId},\n {\n '$set': {\n 'winner': self.Winner.ToJSON()\n }\n }\n )\n\n\n# print(\"-------------------------------------------------------------------------------\")\n\n\nth = TexasHoldem(humans=1, bots=2, statisticsMode=False, autoStart=True, clearDB=False, verbose=True, readable=True, readTime=-1, debug=False)\n# th = TexasHoldem(humans=0, bots=4, statisticsMode=True, clearDB=False, verbose=False, readable=False, readTime=-1, debug=False, autoStart=False)\n# c = 0\n# while c < 50000:\n# th.NewGame()\n# if c % 100 == 0:\n# print('game', c, 'rounds', th.Rounds, 'winner', th.Winner.Name)\n# c += 1\n", "sub_path": "final/TexasHoldem.py", "file_name": "TexasHoldem.py", "file_ext": "py", "file_size_in_byte": 27131, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pymongo.MongoClient", "line_number": 54, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 175, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 218, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 271, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 296, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 297, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 298, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 326, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 347, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 348, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 373, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 542, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 548, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 583, "usage_type": "call"}]} {"seq_id": "45950980", "text": "import argparse\nimport os\n\nimport torch\nfrom torchtext import data, datasets\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image, make_grid\n\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom tqdm import tqdm\n\n\nSEED = 1234\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\n\n# Prevent matplotlib can't run in a non-interactive mode\nimport matplotlib\nmatplotlib.use('Agg') \n\n\n#VAE MODEL\n\nclass VAE(nn.Module):\n def __init__(self, x_dim, h_dim1, h_dim2, z_dim):\n super(VAE, self).__init__()\n\n #Encoding layers\n self.fc1 = nn.Linear(x_dim, h_dim1)\n self.fc2 = nn.Linear(h_dim1, h_dim2)\n self.fc3_mu = nn.Linear(h_dim2, z_dim)\n self.fc3_log_var = nn.Linear(h_dim2, z_dim)\n\n #Decoding layers\n self.fc4 = nn.Linear(z_dim, h_dim2)\n self.fc5 = nn.Linear(h_dim2, h_dim1)\n self.fc6 = nn.Linear(h_dim1, x_dim)\n\n def encoder(self, x):\n \th = F.relu(self.fc1(x))\n \th = F.relu(self.fc2(h))\n \tmu = self.fc3_mu(h)\n \tlog_var = self.fc3_log_var(h)\n \treturn mu, log_var\n\n def reparameterize(self, mu, log_var):\n \tstd = torch.exp(0.5*log_var) # standard deviation\n \teps = torch.randn_like(std)\n \tsample = mu + eps * log_var\n \treturn sample\n\n def decoder(self, z):\n \th = F.relu(self.fc4(z))\n \th = F.relu(self.fc5(h))\n \treconstruction = torch.sigmoid(self.fc6(h))\n \treturn reconstruction\n\n def forward(self, x):\n \tmu, log_var = self.encoder(x.view(-1, 784))\n \tz = self.reparameterize(mu, log_var)\n \treturn self.decoder(z), mu, log_var \n\n\n\ndef main():\n\n\n\ttransform = transforms.Compose([\n\t transforms.ToTensor(),\n\t])\n\n\t# MNIST DATASET\n\n\ttrain_dataset = torchvision.datasets.MNIST(root='./DATA_MNIST',train=True, transform=transform, download=True)\n\ttest_dataset = torchvision.datasets.MNIST(root='./DATA_MNIST',train=False, transform=transform, download = True)\n\n\ttrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n\ttest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False)\n\n\n\t# Model + Loss function\n\t# z_dim = 2!\n\tmodel = VAE(x_dim=784, h_dim1= 512, h_dim2=256, z_dim=2).to(device)\n\toptimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)\n\n\t# Print Loss Result\n\ttrain_losses = []\n\ttrain_losses_BCE = []\n\ttrain_losses_KLD = []\n\n\ttest_losses = []\n\ttest_losses_BCE = []\n\ttest_losses_KLD = []\n\n\tepoch_list = []\n\tfor epoch in range(EPOCHS):\n\t\tprint(f\"Epoch {epoch+1} of {EPOCHS}\")\n\t\ttrain_epoch_loss_BCE, train_epoch_loss_KLD, train_epoch_loss = train(model, optimizer, train_loader)\n\t\ttest_epoch_loss_BCE, test_epoch_loss_KLD, test_epoch_loss = evaluate(model, optimizer, test_loader, BATCH_SIZE, epoch)\n\t\ttrain_losses.append(train_epoch_loss)\n\t\ttrain_losses_BCE.append(train_epoch_loss_BCE)\n\t\ttrain_losses_KLD.append(train_epoch_loss_KLD)\n\t\ttest_losses.append(test_epoch_loss)\n\t\ttest_losses_BCE.append(test_epoch_loss_BCE)\n\t\ttest_losses_KLD.append(test_epoch_loss_KLD)\n\t\tepoch_list.append(epoch+1)\n\t\tprint(f\"Train Loss: {train_epoch_loss:.4f}, Test Loss: {test_epoch_loss:.4f}, Test BCE Loss: {test_epoch_loss_BCE:.4f}, Test KLD Loss: {train_epoch_loss_KLD:.4f}\")\n\n\tfig = plt.figure()\n\tplt.plot(epoch_list, train_losses, color='black', label = \"train loss\")\n\tplt.plot(epoch_list, test_losses, color='red', label = \"test loss\")\n\tplt.legend(loc='best', prop={'size': 10})\n\tplt.xlabel('Epoch')\n\tplt.ylabel('Reconstruction Loss and KL Divergence')\n\tplt.title('VAE_MNIST_Reconstruction_Loss_and_KL_Divergence')\n\tplt.savefig(f\"1VAE_MNIST_final_loss_epoch_2.png\")\n\tplt.close()\n\n\n\tfig = plt.figure()\n\tplt.plot(epoch_list, train_losses_BCE, color='black', label = \"train loss\")\n\tplt.plot(epoch_list, test_losses_BCE, color='red', label = \"test loss\")\n\tplt.legend(loc='best', prop={'size': 10})\n\tplt.xlabel('Epoch')\n\tplt.ylabel('Binary Cross Entropy')\n\tplt.title('VAE_MNIST_Reconstruction_Loss')\n\tplt.savefig(f\"1VAE_MNIST_BCE_loss_epoch_2.png\")\n\tplt.close()\n\n\n\n\tfig = plt.figure()\n\tplt.plot(epoch_list, train_losses_KLD, color='black', label = \"train loss\")\n\tplt.plot(epoch_list, test_losses_KLD, color='red', label = \"test loss\")\n\tplt.legend(loc='best', prop={'size': 10})\n\tplt.xlabel('Epoch')\n\tplt.ylabel(' KL Divergence')\n\tplt.title('VAE_MNIST_KL_Divergence')\n\tplt.savefig(f\"1VAE_MNIST_KLD_loss_epoch_2.png\")\n\tplt.close()\n\n\n\n\ndef loss_function(recon_x, x, mu, log_var):\n\tBCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction = 'sum')\n\t# VAE Gaussian KL Divergence\n\tKLD = -0.5 * torch.sum(1+ log_var - mu.pow(2) - log_var.exp())\n\n\treturn BCE, KLD, BCE + KLD\n\n\ndef train(model, optimizer, train_loader):\n\tmodel.train()\n\trunning_loss = 0.0\n\trunning_loss_BCE = 0.0\n\trunning_loss_KLD = 0.0\n\tfor batch_idx, (images, _ ) in tqdm(enumerate(train_loader), total = int(len(train_loader.dataset) / BATCH_SIZE)):\n\t\timages = images.to(device)\n\t\timages = images.reshape(-1, 784)\n\t\toptimizer.zero_grad()\n\t\trecon_batch, mu, log_var = model(images)\n\t\tBCE_loss, KLD_loss, final_loss = loss_function(recon_batch, images, mu, log_var)\n\t\tfinal_loss.backward()\n\t\trunning_loss += final_loss.item()\n\t\trunning_loss_BCE += BCE_loss.item()\n\t\trunning_loss_KLD += KLD_loss.item()\n\t\toptimizer.step()\n\ttrain_loss = running_loss/len(train_loader.dataset)\n\ttrain_loss_BCE = running_loss_BCE/len(train_loader.dataset)\n\ttrain_loss_KLD = running_loss_KLD/len(train_loader.dataset)\n\treturn train_loss_BCE, train_loss_KLD, train_loss\n\n\ndef evaluate(model, optimizer, test_loader, batch_size, epoch):\n\tmodel.eval()\n\trunning_loss = 0.0\n\trunning_loss_BCE = 0.0\n\trunning_loss_KLD = 0.0\n\twith torch.no_grad():\n\t\tfor batch_idx, (images, _ ) in enumerate(test_loader):\n\t\t\timages = images.to(device)\n\t\t\timages = images.reshape(-1, 784)\n\t\t\trecon_batch, mu, log_var = model(images)\n\t\t\tBCE_loss, KLD_loss, final_loss = loss_function(recon_batch, images, mu, log_var)\n\t\t\trunning_loss += final_loss.item()\n\t\t\trunning_loss_BCE += BCE_loss.item()\n\t\t\trunning_loss_KLD += KLD_loss.item()\n\n\t\t\tif batch_idx == int(len(test_loader.dataset)/batch_size) - 1:\n\t\t\t\t\trecon_batch_ = recon_batch.view(batch_size, 1, 28, 28)[:64]\n\t\t\t\t\tgenerated_img = make_grid(recon_batch_, padding =2, normalize = True)\n\t\t\t\t\tsave_generator_image(generated_img, f\"./1IMAGE_VAE_MNIST_2/epoch{epoch+1}.png\")\n\n\t\t\t\t\t# if epoch == EPOCHS-1:\n\t\t\t\t\t# \timage_batch = images.view(batch_size, 1, 28, 28)[:64]\n\t\t\t\t\t# \treal_img = make_grid(image_batch, padding =2, normalize = True)\n\t\t\t\t\t# \tsave_generator_image(real_img, f\"./IMAGE_MNIST/epoch{epoch+1}.png\")\n\n\ttest_loss = running_loss/len(test_loader.dataset)\n\ttest_loss_BCE = running_loss_BCE/len(test_loader.dataset)\n\ttest_loss_KLD = running_loss_KLD/len(test_loader.dataset)\n\treturn test_loss_BCE, test_loss_KLD, test_loss\n\n\n# to save the images generated by the generator\ndef save_generator_image(image, path):\n save_image(image, path)\n\n\n\nif __name__==\"__main__\":\n \n \n\t\t\t# Arguement Parser\n\t\t\tparser = argparse.ArgumentParser()\n\t\t\tparser.add_argument(\"--gpu\", default=\"0\", type=str)\n\t\t\targs = parser.parse_args()\n\n\t\t\tos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n\t\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\n\t\t\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n\t\t\t# HYPER-PARAMETERS\n\t\t\tBATCH_SIZE = 128\n\t\t\tEPOCHS = 200\n\t\t\tLEARNING_RATE = 1e-3\n\n\t\t\t# latent vector z dimension = 2\n\n\t\t\tmain()", "sub_path": "HW4/1VAE_MNIST_2.py", "file_name": "1VAE_MNIST_2.py", "file_ext": "py", "file_size_in_byte": 7490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "torch.manual_seed", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.exp", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 62, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 76, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 81, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 82, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 155, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 187, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 199, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 215, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 223, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 228, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 229, "usage_type": "attribute"}]} {"seq_id": "188880154", "text": "# basic packages #\r\nimport os\r\nimport numpy as np\r\nimport tensorflow as tf\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\nimport pickle\r\n# trial generation and network building #\r\n\r\nimport sys\r\nsys.path.append('.')\r\nfrom utils import tools\r\n\r\nfrom task_and_network.task import generate_trials\r\nfrom task_and_network.network import Model\r\n\r\ndef gen_task_info(hp,\r\n log,\r\n model_dir, \r\n rules, \r\n return_trial_store=False,):\r\n task_info_file = model_dir+'/task_info.pkl'\r\n if os.path.isfile(task_info_file):\r\n with open(task_info_file,'rb') as tinfr:\r\n task_info = pickle.load(tinfr)\r\n else:\r\n task_info = dict()\r\n \r\n trial_store = dict()\r\n\r\n print(\"Epoch information:\")\r\n for rule in rules:\r\n task_info[rule] = dict()\r\n trial_store[rule] = generate_trials(rule, hp, 'test', noise_on=False)\r\n \r\n n_stims = len([ep for ep in trial_store[rule].epochs.keys() if 'stim' in ep])\r\n stim_loc_log_len = int(len(trial_store[rule].input_loc)/n_stims)\r\n task_info[rule]['in_loc'] = np.array([np.argmax(i) for i in trial_store[rule].input_loc[:stim_loc_log_len]])\r\n if n_stims != 1:\r\n for nst in range(2,n_stims+1):\r\n task_info[rule]['in_loc_'+str(nst)] = \\\r\n np.array([np.argmax(i) for i in trial_store[rule].input_loc[(nst-1)*stim_loc_log_len:nst*stim_loc_log_len]])\r\n\r\n task_info[rule]['in_loc_set'] = sorted(set(task_info[rule]['in_loc']))\r\n task_info[rule]['epoch_info'] = trial_store[rule].epochs\r\n print('\\t'+rule+':')\r\n for e_name, e_time in task_info[rule]['epoch_info'].items():\r\n print('\\t\\t'+e_name+':',e_time)\r\n\r\n with open(task_info_file,'wb') as tinf:\r\n pickle.dump(task_info, tinf)\r\n\r\n if return_trial_store:\r\n return trial_store\r\n\r\ndef compute_H_(hp, model_dir, rule, trial_num, trial=None, task_mode='test'):\r\n\r\n if trial is None:\r\n trial = generate_trials(rule, hp, task_mode, noise_on=False)\r\n \r\n sub_dir = model_dir+'/'+str(trial_num)+'/'\r\n model = Model(sub_dir, hp=hp)\r\n with tf.Session() as sess:\r\n model.restore()\r\n feed_dict = tools.gen_feed_dict(model, trial, hp)\r\n h = sess.run(model.h, feed_dict=feed_dict)\r\n return h\r\n\r\ndef compute_H(\r\n hp,\r\n log,\r\n model_dir, \r\n rules=None, \r\n trial_list=None, \r\n recompute=False,\r\n save_H_pickle=True,\r\n ):\r\n \r\n if rules is not None:\r\n rules = rules\r\n else:\r\n rules = hp['rule_trains']\r\n \r\n if trial_list is None:\r\n trial_list = log['trials']\r\n elif isinstance(trial_list, list):\r\n trial_list = trial_list \r\n\r\n trial_store = gen_task_info(hp,log,model_dir,rules,True,)\r\n\r\n for rule in rules:\r\n if isinstance(trial_list, dict):\r\n temp_list = list()\r\n for value in trial_list[rule].values():\r\n temp_list += value\r\n temp_list = sorted(set(temp_list))\r\n elif isinstance(trial_list, list):\r\n temp_list = trial_list\r\n\r\n for trial_num in temp_list:\r\n H_file = model_dir+'/'+str(trial_num)+'/H_'+rule+'.pkl'\r\n if recompute or not os.path.isfile(H_file):\r\n H_ = compute_H_(hp, model_dir, rule, trial_num, trial = trial_store[rule])\r\n with open(H_file,'wb') as wh:\r\n pickle.dump(H_,wh)\r\n\r\ndef Get_H(hp,model_dir,trial_num,rule,save_H=False,task_mode='test',):\r\n\r\n H_file = model_dir+'/'+str(trial_num)+'/H_'+rule+'.pkl'\r\n\r\n if os.path.isfile(H_file):\r\n with open(H_file,'rb') as hf:\r\n H = pickle.load(hf)\r\n\r\n else:\r\n H = compute_H_(hp, model_dir, rule, trial_num, trial=None, task_mode=task_mode)\r\n if save_H:\r\n with open(H_file,'wb') as wh:\r\n pickle.dump(H,wh)\r\n return H \r\n\r\n\r\n ", "sub_path": "analysis/PSTH_compute_H.py", "file_name": "PSTH_compute_H.py", "file_ext": "py", "file_size_in_byte": 4002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "tensorflow.logging.set_verbosity", "line_number": 5, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "task_and_network.task.generate_trials", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 41, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 50, "usage_type": "call"}, {"api_name": "task_and_network.task.generate_trials", "line_number": 58, "usage_type": "call"}, {"api_name": "task_and_network.network.Model", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.tools.gen_feed_dict", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.tools", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 112, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 118, "usage_type": "call"}]} {"seq_id": "365285813", "text": "from utils import Logger\nimport scipy.signal\nimport signal\nimport numpy as np\n\n\"\"\"\n\n Adapted from code written by Patrick Coady (pat-coady.github.io)\n\n\"\"\"\n\nclass Agent(object):\n def __init__(self,policy,val_func,env,input_normalizer,logger,policy_episodes=20,policy_steps=10,gamma1=0.0,gamma2=9.995,lam=0.98,\n normalize_advantages=True,use_tdlam=False,use_timestep=False,monitor=None, animate=False):\n self.env = env\n self.monitor = monitor\n self.policy_steps = policy_steps\n self.logger = logger\n self.use_tdlam = use_tdlam\n self.use_timestep = use_timestep\n self.policy = policy \n self.val_func = val_func\n self.input_normalizer = input_normalizer\n self.policy_episodes = policy_episodes\n self.animate = animate \n self.normalize_advantages = normalize_advantages \n self.gamma1 = gamma1\n self.gamma2 = gamma2\n\n self.lam = lam\n self.global_steps = 0\n \n \"\"\" \n\n Args:\n policy: policy object with update() and sample() methods\n val_func: value function object with fit() and predict() methods\n env: environment\n input_normalizer: scaler object with apply(), reverse(), and update() methods\n logger: Logger object\n\n policy_episodes: number of episodes collected before update\n policy_steps: minimum number of steps before update\n (will update when either episodes > policy_episodes or steps > policy_steps)\n\n gamma: discount rate\n lam: lambda for GAE calculation\n normalize_advantages: boolean, normalizes advantages if True\n use_tdlam: boolean, True uses TD lambda target for value function, else Monte Carlo \n use_timestep: boolean, True enables time step feature which sometimes works better than a \n low discount rate for continuing tasks with per-step rewards (like Mujoco envs)\n monitor: A monitor object like RL_stats to plot interesting stats as learning progresses\n Monitor object implements update_episode() and show() methods \n animate: boolean, True uses env.render() method to animate episode\n\n \"\"\" \n \n def run_episode(self):\n \"\"\"\n\n Returns: 4-tuple of NumPy arrays\n observes: shape = (episode len, obs_dim)\n actions: shape = (episode len, act_dim)\n rewards: shape = (episode len,)\n unscaled_obs: useful for training scaler, shape = (episode len, obs_dim)\n \"\"\"\n obs = self.env.reset()\n observes, actions, rewards1, rewards2, unscaled_obs = [], [], [], [], []\n done = False\n step = 0.0\n while not done:\n if self.animate:\n self.env.render()\n obs = obs.astype(np.float64).reshape((1, -1))\n unscaled_obs.append(obs.copy())\n if self.input_normalizer is not None:\n obs = self.input_normalizer.apply(obs)\n if self.use_timestep:\n obs = np.append(obs, [[step]], axis=1) # add time step feature\n observes.append(obs)\n action, env_action = self.policy.sample(obs)# .reshape((1, -1)).astype(np.float64) #[:,0:-1])\n actions.append(action)\n obs, reward, done, reward_info = self.env.step(env_action)\n reward1 = reward[0]\n reward2 = reward[1]\n if not isinstance(reward1, float):\n reward1 = np.asscalar(reward1)\n if not isinstance(reward1, float):\n reward2 = np.asscalar(reward2)\n rewards1.append(reward1)\n rewards2.append(reward2)\n step += 1e-3 # increment time step feature\n #logger.log({'Score': sum_rewards})\n return (np.concatenate(observes), np.concatenate(actions), np.array(rewards1, dtype=np.float64), \n np.array(rewards2, dtype=np.float64), np.concatenate(unscaled_obs))\n\n\n def run_policy(self,episode_cnt,warmup=False):\n \"\"\" Run policy and collect data for a minimum of min_steps and min_episodes\n Args:\n episode_cnt: current episode number, used for logging stats \n\n Returns: list of trajectory dictionaries, list length = number of episodes\n 'observes' : NumPy array of states from episode\n 'actions' : NumPy array of actions from episode\n 'rewards' : NumPy array of (un-discounted) rewards from episode\n 'unscaled_obs' : NumPy array of (un-discounted) rewards from episode\n \"\"\"\n total_steps = 0\n e_cnt = 0\n trajectories = []\n #for e in range(self.policy_episodes):\n while e_cnt <= self.policy_episodes or total_steps < self.policy_steps:\n observes, actions, rewards1, rewards2, unscaled_obs = self.run_episode()\n if self.monitor is not None and not warmup:\n self.monitor.update_episode(np.sum(rewards1) + np.sum(rewards2), observes.shape[0])\n total_steps += observes.shape[0]\n trajectory = {'observes': observes,\n 'actions': actions,\n 'rewards1': rewards1,\n 'rewards2': rewards2,\n 'unscaled_obs': unscaled_obs}\n trajectories.append(trajectory)\n e_cnt += 1\n unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])\n if self.input_normalizer is not None:\n self.input_normalizer.update(unscaled) # update running statistics for scaling observations\n\n self.add_value(trajectories) # add estimated values to episodes\n self.add_disc_sum_rew(trajectories, self.gamma1, self.gamma2) # calculated discounted sum of Rs\n self.add_gae(trajectories, self.gamma1, self.gamma2, self.lam) # calculate advantage\n # concatenate all episodes into single NumPy arrays\n observes, actions, advantages, disc_sum_rew = self.build_train_set(trajectories)\n\n if not warmup:\n self.policy.update(observes, actions, advantages, self.logger) # update policy\n self.val_func.fit(observes, disc_sum_rew, self.logger) # update value function\n self.log_batch_stats(observes, actions, advantages, disc_sum_rew, episode_cnt)\n self.global_steps += total_steps\n self.logger.log({'_MeanReward': np.mean([t['rewards1'].sum() + t['rewards2'].sum() for t in trajectories]),\n '_StdReward': np.std([t['rewards1'].sum() + t['rewards2'].sum() for t in trajectories]),\n '_MinReward': np.min([t['rewards1'].sum() + t['rewards2'].sum() for t in trajectories]),\n 'Steps': total_steps,\n 'TotalSteps' : self.global_steps})\n if self.monitor is not None: \n self.monitor.show()\n return trajectories\n\n def train(self,train_episodes, train_samples=None):\n _ = self.run_policy(-1,warmup=True)\n print('*** SCALER WARMUP COMPLETE *** ')\n print(np.sqrt(self.input_normalizer.vars))\n episode = 0\n \n if train_samples is not None:\n while self.global_steps < train_samples:\n trajectories = self.run_policy(episode)\n self.logger.write(display=True)\n episode += len(trajectories)\n else: \n while episode < train_episodes: \n trajectories = self.run_policy(episode)\n self.logger.write(display=True)\n episode += len(trajectories)\n \n \n def discount(self,x, gamma):\n \"\"\" Calculate discounted forward sum of a sequence at each point \"\"\"\n return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]\n\n\n def add_disc_sum_rew(self,trajectories, gamma1, gamma2):\n \"\"\" Adds discounted sum of rewards to all time steps of all trajectories\n\n Args:\n trajectories: as returned by run_policy()\n gamma: discount\n\n Returns:\n None (mutates trajectories dictionary to add 'disc_sum_rew')\n \"\"\"\n for trajectory in trajectories:\n if gamma1 < 0.999: # don't scale for gamma ~= 1\n rewards1 = trajectory['rewards1'] * (1 - gamma1)\n else:\n rewards1 = trajectory['rewards1']\n\n if gamma2 < 0.999: # don't scale for gamma ~= 1\n rewards2 = trajectory['rewards2'] * (1 - gamma2)\n else:\n rewards2 = trajectory['rewards2']\n\n disc_sum_rew1 = self.discount(rewards1, gamma1)\n disc_sum_rew2 = self.discount(rewards2, gamma2)\n\n trajectory['disc_sum_rew'] = disc_sum_rew1 + disc_sum_rew2\n\n\n def add_value(self,trajectories):\n \"\"\" Adds estimated value to all time steps of all trajectories\n\n Args:\n trajectories: as returned by run_policy()\n val_func: object with predict() method, takes observations\n and returns predicted state value\n\n Returns:\n None (mutates trajectories dictionary to add 'values')\n \"\"\"\n for trajectory in trajectories:\n observes = trajectory['observes']\n values = self.val_func.predict(observes)\n trajectory['values'] = values\n\n\n def add_gae(self,trajectories, gamma1, gamma2, lam):\n \"\"\" Add generalized advantage estimator.\n https://arxiv.org/pdf/1506.02438.pdf\n\n Args:\n trajectories: as returned by run_policy(), must include 'values'\n key from add_value().\n gamma: reward discount\n lam: lambda (see paper).\n lam=0 : use TD residuals\n lam=1 : A = Sum Discounted Rewards - V_hat(s)\n\n Returns:\n None (mutates trajectories dictionary to add 'advantages')\n \"\"\"\n for trajectory in trajectories:\n advantages = trajectory['disc_sum_rew'] - trajectory['values']\n trajectory['advantages'] = advantages\n\n def build_train_set(self,trajectories):\n \"\"\"\n\n Args:\n trajectories: trajectories after processing by add_disc_sum_rew(),\n add_value(), and add_gae()\n\n Returns: 4-tuple of NumPy arrays\n observes: shape = (N, obs_dim)\n actions: shape = (N, act_dim)\n advantages: shape = (N,)\n disc_sum_rew: shape = (N,)\n \"\"\"\n observes = np.concatenate([t['observes'] for t in trajectories])\n actions = np.concatenate([t['actions'] for t in trajectories])\n disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories])\n advantages = np.concatenate([t['advantages'] for t in trajectories])\n # normalize advantages\n if self.normalize_advantages:\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)\n else:\n advantages = advantages - advantages.mean()\n\n return observes, actions, advantages, disc_sum_rew\n\n def log_batch_stats(self,observes, actions, advantages, disc_sum_rew, episode):\n \"\"\" Log various batch statistics \"\"\"\n self.logger.log({'_mean_obs': np.mean(observes),\n '_min_obs': np.min(observes),\n '_max_obs': np.max(observes),\n '_std_obs': np.mean(np.var(observes, axis=0)),\n '_mean_act': np.mean(actions),\n '_min_act': np.min(actions),\n '_max_act': np.max(actions),\n '_std_act': np.mean(np.var(actions, axis=0)),\n '_mean_adv': np.mean(advantages),\n '_min_adv': np.min(advantages),\n '_max_adv': np.max(advantages),\n '_std_adv': np.var(advantages),\n '_mean_discrew': np.mean(disc_sum_rew),\n '_min_discrew': np.min(disc_sum_rew),\n '_max_discrew': np.max(disc_sum_rew),\n '_std_discrew': np.var(disc_sum_rew),\n '_Episode': episode\n })\n\n", "sub_path": "RL_lib/Agents/PPO/agent_mdr2.py", "file_name": "agent_mdr2.py", "file_ext": "py", "file_size_in_byte": 12418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "numpy.float64", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.asscalar", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.asscalar", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 152, "usage_type": "call"}, {"api_name": "scipy.signal.signal.lfilter", "line_number": 169, "usage_type": "call"}, {"api_name": "scipy.signal.signal", "line_number": 169, "usage_type": "attribute"}, {"api_name": "scipy.signal", "line_number": 169, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 277, "usage_type": "call"}]} {"seq_id": "174299414", "text": "#from doo.doo import Thread2\nfrom mp.utils import normr,almostEq\nfrom mp.geom import angBetween\nfrom numpy import eye,dot,diag,cross,zeros\nfrom numpy.random import randn\nimport logging; L = logging.Logger(__file__)\nL.setLevel(logging.DEBUG)\nfrom nibabel.quaternions import angle_axis2mat,mat2quat,quat2angle_axis\n\ndef getState(thread):\n \"get position and orientation of ends\"\n xyz = thread.getXYZ()\n s0,s1,e1,e0 = xyz.T[[0,1,-2,-1]]\n return s0,normr(s1-s0),e0,normr(e0-e1)\n \ndef setOrs(thread,pos1,or1,pos2,or2):\n \"set orientation of ends\"\n old_pos1,old_or1,old_pos2,old_or2 = getState(thread)\n thread.applyMotion(pos1-old_pos1,minRot(or1,old_or1),\n pos2-old_pos2,minRot(or2,old_or2))\n\ndef applyTwist(thread,twist1,twist2):\n \"twist the ends\"\n _,or1,_,or2 = getState(thread)\n thread.applyMotion(zeros(3),angle_axis2mat(twist1,or1),zeros(3),angle_axis2mat(twist2,or2))\n \ndef minRot(ax1,ax2):\n \"find the rotation matrix that takes ax1 to ax2\"\n if almostEq(ax1,ax2):\n L.debug(\"minRot: same vector\")\n return eye(3)\n elif almostEq(ax1,-ax2):\n L.debug(\"minRot: opp vector\")\n return -diag([-1,-1,0])\n else:\n ax_rot = cross(ax2,ax1)\n return angle_axis2mat(angBetween(ax1,ax2),ax_rot)\n \ndef infTwist(xyz):\n pts = xyz.T\n ors = normr(pts[1:] - pts[:-1])\n \n start2end_rope = eye(3)\n for (or1,or2) in zip(ors[:-1],ors[1:]):\n start2end_rope = dot(minRot(or2,or1),start2end_rope)\n \n assert almostEq(dot(start2end_rope,ors[0]),ors[-1])\n end2start_min = minRot(ors[0],ors[-1])\n \n twist_mat = dot(end2start_min,start2end_rope)\n ang,_ = quat2angle_axis(mat2quat(twist_mat))\n return ang\n\ndef test_infTwist():\n xyz = randn(3,100)\n infTwist(xyz)\n \ndef test_minRot():\n for _ in xrange(100):\n x1 = normr(randn(3))\n x2 = normr(randn(3))\n assert almostEq(x1,dot(minRot(x1,x2),x2))\n \n\nif __name__ == \"__main__\":\n test_minRot()\n test_infTwist()", "sub_path": "john/mp/simplectrl2.py", "file_name": "simplectrl2.py", "file_ext": "py", "file_size_in_byte": 2019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.Logger", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 7, "usage_type": "attribute"}, {"api_name": "mp.utils.normr", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "nibabel.quaternions.angle_axis2mat", "line_number": 25, "usage_type": "call"}, {"api_name": "mp.utils.almostEq", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 31, "usage_type": "call"}, {"api_name": "mp.utils.almostEq", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 36, "usage_type": "call"}, {"api_name": "nibabel.quaternions.angle_axis2mat", "line_number": 37, "usage_type": "call"}, {"api_name": "mp.geom.angBetween", "line_number": 37, "usage_type": "call"}, {"api_name": "mp.utils.normr", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 45, "usage_type": "call"}, {"api_name": "mp.utils.almostEq", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 50, "usage_type": "call"}, {"api_name": "nibabel.quaternions.quat2angle_axis", "line_number": 51, "usage_type": "call"}, {"api_name": "nibabel.quaternions.mat2quat", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 55, "usage_type": "call"}, {"api_name": "mp.utils.normr", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 60, "usage_type": "call"}, {"api_name": "mp.utils.normr", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 61, "usage_type": "call"}, {"api_name": "mp.utils.almostEq", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 62, "usage_type": "call"}]} {"seq_id": "76254094", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Task\n\n\ndef change_status(modeladmin, request, queryset):\n queryset.update(task_status='Complete')\n\nchange_status.short_description = \"Mark selected tasks as complete\"\n\n\nclass TaskAdmin(admin.ModelAdmin):\n list_display = ('task_name', 'task_status', 'task_priority', 'target_date')\n list_filter = ('task_status', 'task_priority', 'target_date')\n search_fields = ['task_name', 'task_status', 'task_priority']\n # list_editable = ['task_status']\n actions = [change_status]\n\n\nadmin.site.register(Task, TaskAdmin)\n", "sub_path": "fav/todo/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 609, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}]} {"seq_id": "141133772", "text": "\"\"\"\nREADME\n===============================================================================\nThis is an adapter to represent axographio objects as neo objects.\n\naxographio is a file i/o Python module that can read in axograph \".axgx\" files.\nIt is available under a BSD-3-Clause license and can be installed from pip.\nThe following file types are supported:\n\n - AXGX/AXGD (Axograph X file format)\n\nBased on stimfitio.pyfrom neo.io\n\n11 JUL 2018, W. Hart, Swinburne University, Australia\n\"\"\"\n\n# needed for python 3 compatibility\nfrom __future__ import absolute_import\n\nfrom datetime import datetime\nimport os\nimport sys\n\nimport numpy as np\nimport quantities as pq\n\nfrom neo.io.baseio import BaseIO\nfrom neo.core import Block, Segment, AnalogSignal\n\ntry:\n import axographio\nexcept ImportError as err:\n HAS_AXOGRAPHIO = False\n AXOGRAPHIO_ERR = err\nelse:\n HAS_AXOGRAPHIO = True\n AXOGRAPHIO_ERR = None\n\n\nclass AxographIO(BaseIO):\n \"\"\"\n Class for converting an Axographio object to a Neo object.\n Provides a standardized representation of the data as defined by the neo\n project; this is useful to explore the data with an increasing number of\n electrophysiology software tools that rely on the Neo standard.\n\n axographio is a file i/o Python module that can read in axograph \".axgx\" files.\n It is available under a BSD-3-Clause license and can be installed from pip.\n The following file types are supported:\n\n - AXGX/AXGD (Axograph X file format)\n\n Example usage:\n >>> import neo\n >>> neo_obj = neo.io.AxographIO(\"file.axgx\")\n or\n >>> import axographio\n >>> axo_obj = axographio.read(\"file.axgx\")\n >>> neo_obj = neo.io.AxographIO(axo_obj)\n \"\"\"\n\n is_readable = True\n is_writable = False\n\n supported_objects = [Block, Segment, AnalogSignal]\n readable_objects = [Block]\n writeable_objects = []\n\n has_header = False\n is_streameable = False\n\n read_params = {Block: []}\n write_params = None\n\n name = 'AXOGRAPH'\n extensions = ['axgx', 'axgd']\n\n mode = 'file'\n\n def __init__(self, filename=None):\n \"\"\"\n Arguments:\n filename : Either a filename or an axographio object\n \"\"\"\n if not HAS_AXOGRAPHIO:\n raise AXOGRAPHIO_ERR\n\n BaseIO.__init__(self)\n\n if hasattr(filename, 'lower'):\n self.filename = filename\n self.axo_obj = None\n else:\n self.axo_obj = filename\n self.filename = None\n\n def read_block(self, **kargs):\n if self.filename is not None:\n self.axo_obj = axographio.read(self.filename)\n\n # Build up the block\n blk = Block()\n\n blk.rec_datetime = None\n if self.filename is not None:\n # modified time is not ideal but less prone to\n # cross-platform issues than created time (ctime)\n blk.file_datetime = datetime.fromtimestamp(os.path.getmtime(self.filename))\n\n # store the filename if it is available\n blk.file_origin = self.filename\n\n # determine the channel names and counts\n _, channel_ordering = np.unique(self.axo_obj.names[1:], return_index=True)\n channel_names = np.array(self.axo_obj.names[1:])[np.sort(channel_ordering)]\n channel_count = len(channel_names)\n\n # determine the time signal and sample period\n sample_period = self.axo_obj.data[0].step * pq.s\n start_time = self.axo_obj.data[0].start * pq.s\n\n # Attempt to read units from the channel names\n channel_unit_names = [x.split()[-1].strip('()') for x in channel_names]\n channel_units = []\n\n for unit in channel_unit_names:\n try:\n channel_units.append(pq.Quantity(1, unit))\n except LookupError:\n channel_units.append(None)\n\n # Strip units from channel names\n channel_names = [' '.join(x.split()[:-1]) for x in channel_names]\n\n # build up segments by grouping axograph columns\n for seg_idx in range(1, len(self.axo_obj.data), channel_count):\n seg = Segment(index=seg_idx)\n\n # add in the channels\n for chan_idx in range(0, channel_count):\n signal = pq.Quantity(\n self.axo_obj.data[seg_idx + chan_idx], channel_units[chan_idx])\n analog = AnalogSignal(signal,\n sampling_period=sample_period, t_start=start_time,\n name=channel_names[chan_idx], channel_index=chan_idx)\n seg.analogsignals.append(analog)\n\n blk.segments.append(seg)\n\n blk.create_many_to_one_relationship()\n\n return blk\n", "sub_path": "neo/io/axographio.py", "file_name": "axographio.py", "file_ext": "py", "file_size_in_byte": 4725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "neo.io.baseio.BaseIO", "line_number": 40, "usage_type": "name"}, {"api_name": "neo.core.Block", "line_number": 65, "usage_type": "name"}, {"api_name": "neo.core.Segment", "line_number": 65, "usage_type": "name"}, {"api_name": "neo.core.AnalogSignal", "line_number": 65, "usage_type": "name"}, {"api_name": "neo.core.Block", "line_number": 66, "usage_type": "name"}, {"api_name": "neo.core.Block", "line_number": 72, "usage_type": "name"}, {"api_name": "neo.io.baseio.BaseIO.__init__", "line_number": 88, "usage_type": "call"}, {"api_name": "neo.io.baseio.BaseIO", "line_number": 88, "usage_type": "name"}, {"api_name": "axographio.read", "line_number": 99, "usage_type": "call"}, {"api_name": "neo.core.Block", "line_number": 102, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}, {"api_name": "os.path.getmtime", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 115, "usage_type": "call"}, {"api_name": "quantities.s", "line_number": 119, "usage_type": "attribute"}, {"api_name": "quantities.s", "line_number": 120, "usage_type": "attribute"}, {"api_name": "quantities.Quantity", "line_number": 128, "usage_type": "call"}, {"api_name": "neo.core.Segment", "line_number": 137, "usage_type": "call"}, {"api_name": "quantities.Quantity", "line_number": 141, "usage_type": "call"}, {"api_name": "neo.core.AnalogSignal", "line_number": 143, "usage_type": "call"}]} {"seq_id": "276807601", "text": "#!/usr/bin/env python3\r\n\r\nVERSION=\"\"\"grrv2_ocu_color.py\r\nuse with grrv2_rover_controller_2017-12-12.py\r\n\"\"\"\r\n\r\nLANC_COMMAND_HELP = \"\"\"\r\nLANC COMMAND HELP\r\n\r\nKEYBOARD COMMANDS\r\n?: this help message\r\nP: PHOTO //Doesn't work\r\nR, r: REC //To confirm if RECORDING, select 'M' for Menu. REC/STBY status is displayed\r\nz: ZOOM_OUT_4\r\nZ: ZOOM_IN_4\r\nf: FOCUS_NEAR //WORKS, IN MANUAL FOCUS MODE ONLY (Set Manual/Auto via menu).\r\nF: FOCUS_FAR //WORKS, IN MANUAL FOCUS MODE ONLY\r\n\r\nCAMCORDER MENU COMMANDS\r\nM, m: MENU\r\n: SELECT / EXECUTE\r\n8: MENU_UP\r\n2: MENU_DOWN\r\n4: MENU_LEFT or PREV;\r\n6: MENU_RIGHT or NEXT;\r\nO: CAMERA POWER ON\r\no: CAMERA POWER OFF\r\nL: VIDEO LIGHTS ON\r\nl: VIDEO LIGHTS OFF\r\n\r\nRAW LANC COMMAND MODE\r\nX, x: rawHexCommandMenu(); Enter header byte, then raw hex number command byte.\r\n\tIf you don't know what this means, don't mess with it. :-)\r\n\"\"\"\r\n\r\n\r\nimport pygame\r\nfrom pygame import *\r\n\r\nimport getopt\r\nimport math\r\nimport os\r\nimport string\r\nimport sys\r\n\r\nfrom pygame.locals import *\r\nfrom socket import *\r\nfrom time import *\r\nfrom time import sleep\r\n\r\nimport colorama\r\nfrom colorama import Fore, Back, Style\r\n\r\ncolorama.init()\r\n\r\n# Joystick Structure info:\r\n# 8 floats. Up to 6 real axes and 2 virtual axes (from buttons). joystickx[0][0] to [0][7]\r\n# 13 ints. Up to 12 buttons. joystickx[1][0] to [1][12]. [1][0]\r\n# is ignored since joysticks label 12 buttons 1-12, not 0-11\r\n# 1 string. Joystick Name as pulled from Pygame joystickx[2]\r\n\r\njstick = {\r\n\t\"axes\": [0,0,0,0],\r\n\t\"scale\": [125, -125, 125, -125],\t# Corrects native joystick orientation, motor polarity, etc\r\n\t\"buttons\": [0,] * 13, \t\t\t\t# 13 bytes for 12 buttons: 0 is ignored since joysticks number 1-12 not 0-11\r\n\t\"hat\": [0,0], \t\t\t\t\t\t# joystick hat x and y, -1, 0, 1\r\n}\r\n\r\nclass UDPSocket:\r\n\t\"\"\"UDPSocket tracks a remote and simplifies handling multiple targets\"\"\"\r\n\tdef __init__(self, remoteIP, remotePort):\r\n\t\t\"\"\"Opens a new socket with a default destination of remoteIP, remotePort\r\n\r\n Args:\r\n remoteIP (str): Default remote IP.\r\n remotePort (int): Default remote Port\r\n \"\"\"\r\n \r\n\t\tself.remoteIP = remoteIP\r\n\t\tself.remotePort = remotePort\r\n\t\tself.sockobj = socket(AF_INET, SOCK_DGRAM)\r\n\t\tself.sockobj.settimeout(1.0)\r\n\t\tself.last_message = None\r\n\r\n\tdef sendto(self, message, target = None):\r\n\t\t\"\"\"Send a message to target\"\"\"\r\n\t\tmsg = bytes(message)\r\n\t\tif target is None:\r\n\t\t\ttarget = (self.remoteIP, self.remotePort)\r\n\r\n\t\ttry:\r\n\t\t\tself.sockobj.sendto(msg, target)\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e)\r\n\t\t\r\n\t\tif msg != self.last_message:\r\n\t\t\twith open(\"command.log\", \"wb+\") as f:\r\n\t\t\t\tf.write(msg)\r\n\t\t\tself.last_message = msg\r\n\r\n\r\n\tdef recvfrom(self, size):\r\n\t\t\"\"\"receives a message\r\n\t\t\r\n\t\tReturns: (data, remote_address)\r\n\t\t\"\"\"\r\n\t\tdata, remote_addr = self.sockobj.recvfrom(size)\r\n\t\tself.last_remote_addr = remote_addr\r\n\t\treturn data, remote_addr\r\n\r\n\r\ndef initalize_joysticks():\r\n\t\"\"\"Walks through the list of joysticks and ensures PYGame has initilized them\"\"\"\r\n\tgrab_input()\r\n\r\n\tfor x in range(joystick.get_count()): \t\t\t# inits however many joysticks are attached, usually 1 or 2\r\n\t\tj = joystick.Joystick(x) \t\t\t\t\t# Logitech Dual Action is 1 joystick w/ 2 sticks with 1 XY axis pair (4 axes).\r\n\t\tj.init() \t\t\t\t# Most elaborate joysticks are 1 joystick 1 stick with 2 axes (x, y)\r\n\t\ttxt = 'Enable Joystick: ' + j.get_name()\r\n\t\t# jstick['name'] = j[x].get_name()\r\n\t\tprint(txt)\r\n\r\n\trelease_input()\r\n\r\n\r\ndef setup_window():\r\n\t\"\"\"sets the x,y position of the window and required video mode flags\r\n\twe don't actually need to run the window to run the program\r\n\t\"\"\"\r\n\tos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d, %d\" % (1600, 40)\r\n\tvideo_flags = OPENGL|DOUBLEBUF \r\n\tdisplay.set_mode((128,96), video_flags)\r\n\r\n\r\ndef main():\r\n\tprint(VERSION)\r\n\r\n\t# pygame initialization\r\n\tsetup_window()\r\n\tpygame.init()\r\n\tclock, ticks = pygame.time.Clock(), pygame.time.get_ticks()\r\n\tinitalize_joysticks()\r\n\r\n\t# a socket to talk to the rover\r\n\trover_socket = UDPSocket('192.168.1.99', 60615)\r\n\r\n\tprint('Starting' + \"\\n\"*4)\r\n\r\n\tpilt_upcounter = 0\r\n\tpilt_downcounter = 255\r\n\tloops=1\r\n\r\n\tmotor_enable_flag = 0\r\n\r\n\tlast_keydown = -1\r\n\tkeyboard_keys = [0,0]\r\n\r\n\twhile True:\r\n\t\tclock.tick(300) # clock.tick(x) x = Hertz of control loop. 100=100Hz, 20 = 20 Hz Works\r\n\r\n\t\tfor e in event.get():\r\n\t\t\t# print e.type,' ',\r\n\r\n\t\t\tif e.type == pygame.JOYBUTTONDOWN:\r\n\t\t\t\tbutton = e.dict['button'] + 1\r\n\t\t\t\t# print(\"button down\")\r\n\t\t\t\tjstick['buttons'][0] = button\r\n\t\t\t\tif button == 9:\r\n\t\t\t\t\tgrab_input()\r\n\t\t\t\tif button == 10:\r\n\t\t\t\t\trelease_input()\r\n\t\t\t\t# print button, ' ',jstick['buttons'][button]\r\n\r\n\t\t\tif e.type == pygame.JOYBUTTONUP:\r\n\t\t\t\t# print(\"button up\")\r\n\t\t\t\tbutton = e.dict['button'] + 1\r\n\t\t\t\tjstick['buttons'][0] = 0\r\n\t\t\t\t# print button, ' ',jstick['buttons'][button]\r\n\r\n\t\t\tif e.type == pygame.KEYDOWN:\r\n\t\t\t\t# print(e)\r\n\t\t\t\tkey, mod, uni = e.dict['key'], e.dict['mod'], e.unicode\r\n\t\t\t\tif len(uni) > 0:\r\n\t\t\t\t\tuni = ord(uni)\r\n\t\t\t\telse:\r\n\t\t\t\t\tuni = 0\r\n\t\t\t\t# key = e.dict['key'] - 32 * e.dict['mod'] # mod = 1 is shift\r\n\r\n\t\t\t\t# check for control keys\r\n\t\t\t\tif mod & pygame.KMOD_CTRL:\r\n\t\t\t\t\tif key == pygame.K_l:\t\t# lock mouse and keyboard to game window\r\n\t\t\t\t\t\tgrab_input()\r\n\r\n\t\t\t\t\tif key in (pygame.K_c, pygame.K_d):\r\n\t\t\t\t\t\tend_program()\r\n\r\n\t\t\t\tif key == pygame.K_ESCAPE:\t\t# release mouse and keyboard from game window\r\n\t\t\t\t\trelease_input()\r\n\r\n\t\t\t\tif key == pygame.K_SPACE: \t\t# ALL STOP SPACE BAR (Big, easy to hit in a panic for faux E-STOP)\r\n\t\t\t\t\tmotor_enable_flag = 0\r\n\r\n\t\t\t\tif key == pygame.K_e and mod & pygame.KMOD_SHIFT: # 'E', ENABLE MOTORS\r\n\t\t\t\t\tmotor_enable_flag = 1\r\n\r\n\t\t\t\t# map arrows, pygame's 273..276 (up, down, right, left) => 128..131\r\n\t\t\t\tif key in (pygame.K_LEFT, pygame.K_RIGHT, pygame.K_UP, pygame.K_DOWN):\r\n\t\t\t\t\tmap = {pygame.K_LEFT:131, pygame.K_RIGHT:130, pygame.K_UP:128, pygame.K_DOWN:129}\r\n\t\t\t\t\tkeyboard_keys[0] = map[key]\r\n\r\n\t\t\t\tif key == 47:\r\n\t\t\t\t\tprint(LANC_COMMAND_HELP)\r\n\r\n\t\t\t\t# Only send non-continuous commands once\r\n\t\t\t\tcontinuous_key_commands = ('f','F','z','Z')\r\n\t\t\t\tif uni not in continuous_key_commands and uni == last_keydown:\r\n\t\t\t\t\tuni = -1\r\n\t\t\t\t\tkeyboard_keys[0] = 0\r\n\r\n\t\t\t\t# The above, including local commands & esc, space, and E all fall through to here and will be sent to the remote. Intentional?\r\n\t\t\t\tif uni >= 0 and uni < 256:\r\n\t\t\t\t\tkeyboard_keys[0] = uni\r\n\r\n\t\t\tif e.type == pygame.KEYUP:\r\n\t\t\t\tkey, mod = e.dict['key'], e.dict['mod']\r\n\r\n\t\t\t\tif key >= 0 and key < 256:\r\n\t\t\t\t\tkeyboard_keys[0] = 0\r\n\r\n\t\t\t\tif key in (pygame.K_LEFT, pygame.K_RIGHT, pygame.K_UP, pygame.K_DOWN):\r\n\t\t\t\t\tkeyboard_keys[0] = 0\r\n\t\t\t\t\r\n\t\t\t\tlast_keydown = -1\r\n\r\n\t\t\tif e.type == pygame.JOYAXISMOTION:\r\n\t\t\t\taxis = e.dict['axis']\r\n\t\t\t\t# print(\"joy motion\")\r\n\t\t\t\tjstick['axes'][axis] = int(e.dict['value'] * jstick['scale'][axis]) # -1.0 to 0 to 1.0 * +/-125\r\n\r\n\t\t\tif e.type == pygame.JOYHATMOTION: # HAT MOTION Use hat +/-y for LANC zoom? And +/-x for focus?\r\n\t\t\t\t# print(\"joy hat motion\")\r\n\t\t\t\tjstick['hat'] = e.dict['value'] # 0,1 are x,y\r\n\t\t\t\t# print(\"\\njstick hat: \", jstick['hat'])\r\n\t\t\t\t# since all LANC commands must be sent down the serial LANC wire\r\n\t\t\t\t# it's best to treat the Hat as keystrokes so they never compete\r\n\t\t\t\t# and the keystroke SwitchCase can prioritize them\r\n\r\n\t\t\t\tfocus_commands = { 1: ord('F'), -1: ord('f') }\r\n\t\t\t\tzoom_commands = { 1: ord('Z'), -1: ord('z') }\r\n\r\n\t\t\t\t# look it up in the dictionary. If it's not there, use '0'\r\n\t\t\t\tkey = focus_commands.get(jstick['hat'][0], 0)\r\n\t\t\t\t# look up zoom state, if any, and allow that to override focus\r\n\t\t\t\tkey = zoom_commands.get(jstick['hat'][1], key)\r\n\t\t\t\t# print('jstick hat to key', key)\r\n\t\t\t\tkeyboard_keys[0] = key\r\n\r\n\t\t\t# if e.type == pygame.JOYBALLMOTION:\r\n\t\t\t# \tpass\r\n\t\t\t# if e.type in (pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION):\r\n\t\t\t# \tpass\r\n\r\n\r\n\t\t# send the joystick and keyboard commands to the rover\r\n\t\tmotions = stick_to_motions(jstick['axes'])\r\n\t\t# print(motions, \"\\n\")\r\n\t\tmsg = build_rover_command_byte_packet(motions, joystick, keyboard_keys, motor_enable_flag)\r\n\t\tprint('cmd_chr', *msg, end = ' ')\r\n\t\trover_socket.sendto(msg)\r\n\r\n\t\t# RECEIVE DATA FROM ROBOT\r\n\t\ttry:\r\n\t\t\trover_packet, remote_address = rover_socket.recvfrom(1024)\r\n\t\t\trover_status, pilt_status, rover_message = unpack_rover_message(rover_packet)\r\n\r\n\t\t\tpilt_upcounter = (pilt_upcounter + 1) % 255\r\n\t\t\tpilt_downcounter = 255 - pilt_upcounter\r\n\r\n\t\t\tprint_rover_pilt_status(rover_status, pilt_status, rover_message, pilt_upcounter, pilt_downcounter, loops)\r\n\t\texcept timeout:\r\n\t\t\tprint('NO ROVER STATUS PACKET RECEIVED. Last known ', end=' ')\r\n\r\n\t\tprint(\" \\r\", end=' ')\r\n\t\tsys.stdout.flush()\r\n\r\n\t\tloops = loops + 1\r\n\t\tif loops == 101:\r\n\t\t\tloops = 1\r\n\r\ndef pilt_indicator_hue(x):\r\n\t\"\"\"return a color format for printing based on the range of the value\"\"\"\r\n\r\n\t# Well within the limits of travel of pan and tilt\r\n\tif 96 <= x <= 160:\r\n\t\treturn Style.BRIGHT + Fore.GREEN \r\n\r\n\t# Approaching the limits of travel of pan and tilt\r\n\tif 64 <= x <= 192:\r\n\t\treturn Style.BRIGHT + Fore.YELLOW\r\n\r\n\t# AT OR EXCEEDING the limits of travel of pan and tilt\r\n\treturn Style.BRIGHT + Fore.RED\r\n\t\r\ndef pilt_indicator(x):\r\n\t\"\"\"Convert a PILT reading to a colorized string\"\"\"\r\n\treturn pilt_indicator_hue(x) + str(x) + Style.RESET_ALL\r\n\r\ndef print_rover_pilt_status(rover_status, pilt_status, rover_message, pilt_upcounter, pilt_downcounter, loops):\r\n\t\"\"\"Prints one line corresponding to the rover status. Multiple calls will overwrite previous lines.\"\"\"\r\n\tprint(' rover_status', *rover_status, end=' ')\r\n\r\n\tprint(' pilt_status', end=' ')\r\n\r\n\tfor i in range(min(14, len(pilt_status))):\r\n\t\tif i==2:\r\n\t\t\tprint(pilt_indicator(pilt_upcounter), end = ' ')\r\n\t\telif i==5:\r\n\t\t\tprint(pilt_indicator(pilt_downcounter), end = ' ')\r\n\t\telse:\r\n\t\t\tprint(pilt_status[i], end=' ')\r\n\r\n\tprint(' %s %d \\r' % (rover_message, loops), end=' ')\r\n\r\n\tsys.stdout.flush()\r\n\r\ndef build_rover_command_byte_packet(motions, joystick, keyboard_keys, motor_enable_flag):\r\n\t\"\"\"Builds a rover command message from parameters\"\"\"\r\n\r\n\tdef wire_signed(x):\r\n\t\t\"\"\"Converts (-1,1) to (0,1) for PILT\"\"\"\r\n\t\tif x == -1:\r\n\t\t\treturn 0\r\n\t\treturn 1\r\n\r\n\tmsg = [\r\n\t\tmotions['pwm_left'],\t\t\t\t\t\t# Left Stick (X-Y mixed): Left PWM abs value\r\n\t\twire_signed(motions['sign_left']),\t\t\t# Left Stick (X-Y mixed): Left PWM boolean sign map(1=1, -1=0)\r\n\t\tmotions['pwm_right'],\t\t\t\t\t\t# Left Stick (X-Y mixed): Right PWM abs value\r\n\t\twire_signed(motions['sign_right']),\t\t\t# Left Stick (X-Y mixed): Right PWM boolean sign map(1=1, -1=0)\r\n\t\tabs(motions['stick_pan']),\t\t\t\t\t# Right Stick,PWM, Pan\r\n\t\twire_signed(sign(motions['stick_pan'])),\t# Right Stick,DIR, Pan\r\n\t\tabs(motions['stick_tilt']),\t\t\t\t\t# Right Stick,PWM, Tilt\r\n\t\twire_signed(sign(motions['stick_tilt'])),\t# Right Stick,DIR, Pan\r\n\t\tjstick['buttons'][0],\r\n\t\tkeyboard_keys[0], \t\t\t\t# commands for lanc, lights, whatever to Rover\r\n\t\tjstick['hat'][0] + 1,\t \t\t# JS Hat values are -1, 0, +1.\r\n\t\tjstick['hat'][1] + 1, \t\t\t# be unsigned: like joystick axes, it must be converted\r\n\t\tmotor_enable_flag,\r\n\t\t0, \t\t\t\t\t\t\t\t# Spare\r\n\t\t0, \t\t\t\t\t\t\t\t# Spare\r\n\t\t0, \t\t\t\t\t\t\t\t# Spare\r\n\t] + [45]\r\n\t# [45] is an end of message marker\r\n\r\n\tassert len(msg) == 17\r\n\treturn msg\r\n\r\ndef unpack_rover_message(rover_packet):\r\n\t\"\"\"break apart the received packet and convert from array of bytes to native python types\r\n\tReturns:\r\n\t\trover_status (:obj:`list` of :obj:`int`))\r\n\t\tpilt_status (:obj:`list` of :obj:`int`))\r\n\t\trover_message (str)\r\n\t\"\"\"\r\n\t\r\n\trover_status = [x for x in rover_packet[0:16]]\t# 16 bytes rover\r\n\tpilt_status = [x for x in rover_packet[16:30]]\t# 14 bytes Arduino status\r\n\trover_message = rover_packet[32:48]\t\t\t\t\t\t# Arduino puts.\r\n\tif not isinstance(rover_message, str):\r\n\t\t# In Python 3 strings are no longer equivalent to byte arrays, so go through a decode step\r\n\t\trover_message = rover_message.decode('ascii')\r\n\treturn rover_status, pilt_status, rover_message\r\n\r\n\r\ndef stick_to_motions(joystick):\r\n\t\"\"\"Translates joystick to PWMs, signs, octant, pan, and tilt\"\"\"\r\n\tpwm_left, pwm_right, sign_left, sign_right, octant = map_x_y_to_left_right_pwms(jstick['axes'])\r\n\tstick_pan = jstick['axes'][2]\r\n\tstick_tilt = jstick['axes'][3]\r\n\tmotions = {\r\n\t\t\"pwm_left\": pwm_left,\r\n\t\t\"pwm_right\": pwm_right,\r\n\t\t\"sign_left\": sign_left,\r\n\t\t\"sign_right\": sign_right,\r\n\t\t\"stick_pan\": stick_pan,\r\n\t\t\"stick_tilt\": stick_tilt,\r\n\t\t\"octant\": octant,\r\n\t}\r\n\treturn motions\r\n\r\n\r\ndef get_octant(joystick):\r\n\t\"\"\"get_octant() handles left gamepad joystick mixing for the tank skid-steering\r\n\tAs normal cartesian grid has 4 quadrants where X and Y are + /-, we need 8 divisions\r\n\tSo 'octants'. Based on 3 conditions:\r\n\t\tabs_x => abs_y; sgn_x >0; sgn_y >0. from these are derived the left and right\r\n\t\t'tank track' pwm mixing for single stick skid steer used in map_x_y_to_left_right()\r\n\t\"\"\"\r\n\r\n\tstick_x = jstick['axes'][0]\r\n\tstick_y = jstick['axes'][1]\r\n\r\n\toctant = 0\r\n\tif abs(stick_y) >= abs(stick_x):\r\n\t\toctant += 4\r\n\r\n\tif stick_y < 0:\r\n\t\toctant += 2\r\n\r\n\tif stick_x < 0:\r\n\t\toctant += 1\r\n\r\n\treturn octant\r\n\r\n\r\ndef map_x_y_to_left_right_pwms(joystick):\r\n\t\"\"\"basically, left joystick positions within XY octants defined as:\r\n\tstick fwd, left right tracks fwd: back = bkwd\r\n\tstick 90 left or right = pivot lft or rt (one track fwd opposite track rev)\r\n\tstick 45 deg = soft turn; one track moving, other track stopped.\r\n\tintermediate angles smooth continuum of translate rotated between the above\r\n\t\"\"\"\r\n\r\n\toctant = get_octant(jstick['axes'])\r\n\r\n\tstick_x, stick_y = jstick['axes'][0], jstick['axes'][1]\r\n\tabs_x, abs_y = abs(stick_x), abs(stick_y)\r\n\tsign_x, sign_y = sign(stick_x), sign(stick_y)\r\n\r\n\tif octant in (0, 3):\r\n\t\tpwm_left = abs_y\r\n\t\tpwm_right = abs_y - abs_x\r\n\t\tsign_left = sign_y\r\n\t\tsign_right = sign_y\r\n\r\n\tif octant in (1, 2):\r\n\t\tpwm_left = abs_y - abs_x\r\n\t\tpwm_right = abs_y\r\n\t\tsign_left = sign_y\r\n\t\tsign_right = sign_y\r\n\r\n\tif octant in (4, 7):\r\n\t\tpwm_left = abs_x\r\n\t\tpwm_right = abs_x - abs_y\r\n\t\tsign_left = sign_x\r\n\t\tsign_right = -sign_x\r\n\r\n\tif octant in (5, 6):\r\n\t\tpwm_left = abs_x - abs_y\r\n\t\tpwm_right = abs_x\r\n\t\tsign_left = sign_x\r\n\t\tsign_right = -sign_x\r\n\r\n\tdead_boundary = 12\r\n\r\n\tif pwm_left < dead_boundary:\r\n\t\tpwm_left = 0\r\n\tif pwm_right < dead_boundary:\r\n\t\tpwm_right = 0\r\n\r\n\treturn (pwm_left, pwm_right, sign_left, sign_right, octant)\r\n\r\n\r\ndef sign(number):\r\n\t\"\"\"Returns 1 if the number >= 0, otherwise -1\"\"\"\r\n\tif number >= 0:\r\n\t\treturn 1\r\n\treturn -1\r\n\r\ndef grab_input():\r\n\t\"\"\"Lock mouse and keyboard to window\"\"\"\r\n\tevent.set_grab(True)\r\n\r\ndef release_input():\r\n\t\"\"\"Release mouse and keyboard from being locked to window\"\"\"\r\n\tevent.set_grab(False)\r\n\r\ndef end_program():\r\n\trelease_input()\r\n\tprint(\"ALL STOP\")\r\n\tprint(\"EXITING OCU PROGRAM\")\r\n\tsys.exit(0) # exits program\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n", "sub_path": "ocu-color-py3-2019-01-30.py", "file_name": "ocu-color-py3-2019-01-30.py", "file_ext": "py", "file_size_in_byte": 14697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "colorama.init", "line_number": 54, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 141, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 141, "usage_type": "call"}, {"api_name": "pygame.JOYBUTTONDOWN", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pygame.JOYBUTTONUP", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pygame.KMOD_CTRL", "line_number": 190, "usage_type": "attribute"}, {"api_name": "pygame.K_l", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.K_c", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pygame.K_e", "line_number": 203, "usage_type": "attribute"}, {"api_name": "pygame.KMOD_SHIFT", "line_number": 203, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pygame.JOYAXISMOTION", "line_number": 235, "usage_type": "attribute"}, {"api_name": "pygame.JOYHATMOTION", "line_number": 240, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 284, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 284, "usage_type": "attribute"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 295, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 295, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 295, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 295, "usage_type": "name"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 299, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 299, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 299, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 299, "usage_type": "name"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 302, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 302, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 302, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 302, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 306, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 306, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 324, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 324, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 482, "usage_type": "call"}]} {"seq_id": "153570739", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 12 14:07:57 2020\n\n@author: USER\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport re\n\nlink = 'https://hh.ru'\ntext = 'ะธะฝะถะตะฝะตั€ ัะปะตะบั‚ั€ะธะบ'\narea_num = 2\n\ndef page_data(soup):\n #dataframe\n vacancies = pd.DataFrame()\n #ะฑะปะพะบ ัะพ ะฒัะตะผะธ ะฒะฐะบะฐะฝัะธัะผะธ ะปะธัั‚ะฐ\n vac_block = soup.find('div', {'class':'vacancy-serp'})\n #ัะฟะธัะพะบ ะฒะฐะบะฐะฝัะธะน\n vac_list = vac_block.find_all('div', {'class':'vacancy-serp-item'}, recurcive=False)\n #ะดะฐะฝะฝั‹ะต ะฟะพ ะบะฐะถะดะพะน ะฒะฐะบะฐะฝัะธะธ\n for vac in vac_list:\n vac_dict = {}\n vac_dict['name'] = vac.find('a', {'data-qa':'vacancy-serp__vacancy-title'}).text \n vac_dict['salary'] = vac.find('div', {'class':'vacancy-serp-item__sidebar'}).text\n if '-' in vac_dict['salary']:\n res = re.search(r'([\\d ]+\\D*[\\d ]*)-([\\d ]+\\D*[\\d ]*)',vac_dict['salary'])\n vac_dict['min_salary'] = res.group(1)\n vac_dict['max_salary'] = res.group(2)\n elif 'ะพั‚' in vac_dict['salary']:\n res = re.search(r'([\\d]+\\D*[\\d]*) ',vac_dict['salary'])\n vac_dict['min_salary'] = res.group(1)\n \n elif 'ะดะพ' in vac_dict['salary']:\n res = re.search(r'([\\d]+\\D*[\\d]*) ',vac_dict['salary'])\n vac_dict['max_salary'] = res.group(1)\n \n else:\n None\n if vac_dict['salary']:\n vac_dict['currency'] = re.search(r' ([\\w\\.]+$)',vac_dict['salary']).group(1)\n else:\n None\n vac_dict['link'] = vac.find('a', {'data-qa':'vacancy-serp__vacancy-title'})['href']\n if vac.find('a', {'data-qa':'vacancy-serp__vacancy-employer'}):\n vac_dict['company'] = vac.find('a', {'data-qa':'vacancy-serp__vacancy-employer'}).text\n else:\n vac_dict['company']='not_found'\n vac_dict['city'] = vac.find('span', {'data-qa':'vacancy-serp__vacancy-address'}).text\n vac_dict['source'] = 'hh.ru'\n vacancies = vacancies.append(vac_dict, ignore_index = True)\n return(vacancies)\n \n\n\ndef scrap_hh(link, text, area_num):\n area = area_num\n vacancy_name = text\n url = link\n params = {'area':area,\n 'st':'searchVacancy',\n 'text': vacancy_name,\n 'fromSearch':'true'}\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\n rq = requests.get(url+'/search/vacancy', params=params, headers=headers)\n soup = bs(rq.text, 'lxml')\n result = pd.DataFrame()\n result = page_data(soup)\n print(f'ัั‚ั€ะฐะฝะธั†ะฐ {1} ะดะพะฑะฐะฒะปะตะฝะฐ')\n pager_block = soup.find('div', {'data-qa':'pager-block'})\n next_page_tail = pager_block.find('a', {'data-qa':'pager-next'})\n i = 2 \n while next_page_tail:\n soup = bs(requests.get(url+next_page_tail['href'], headers = headers).text, 'lxml')\n result = result.append(page_data(soup), ignore_index = True)\n pager_block = soup.find('div', {'data-qa':'pager-block'})\n next_page_tail = pager_block.find('a', {'data-qa':'pager-next'})\n print(f'ัั‚ั€ะฐะฝะธั†ะฐ {i} ะดะพะฑะฐะฒะปะตะฝะฐ')\n i = i + 1\n return(result)\n \n\ndf = scrap_hh(link, text, area_num).loc[:, ['name','city', 'min_salary', 'max_salary', 'currency', 'link', 'source']]\n\nprint(df)", "sub_path": "hw2/hh_scrapper.py", "file_name": "hh_scrapper.py", "file_ext": "py", "file_size_in_byte": 3448, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "call"}, {"api_name": "re.search", "line_number": 30, "usage_type": "call"}, {"api_name": "re.search", "line_number": 34, "usage_type": "call"}, {"api_name": "re.search", "line_number": 38, "usage_type": "call"}, {"api_name": "re.search", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 68, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 70, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 77, "usage_type": "call"}]} {"seq_id": "244892668", "text": "#!/usr/bin/python3\nfrom PyQt5.QtCore import Qt,QTimer,QDateTime,QDate,QTime\nfrom PyQt5.QtWidgets import (QApplication, QGridLayout, QLabel, QLineEdit, QMessageBox, QPushButton, QTextEdit, QWidget)\nfrom PyQt5.QtGui import QPixmap\nimport math,sys,datetime\n\nclass Main_window(QWidget):\n\tdef set_time(self):\n\t\tself.czas = datetime.datetime(2015,2,3,14,2)\n\t\tself.teraz = datetime.datetime.now()\n\t\tself.pozostaly=self.czas - self.teraz\n\t\tself.time_label.setText(\"Pozostaล‚o: \"+str(self.pozostaly))\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.setWindowTitle(\"Zegar ลšmierci\")\n\t\tself.layout=QGridLayout()\n\t\tself.time_label=QLabel()\n\t\tself.layout.addWidget(self.time_label)\n\t\tself.setLayout(self.layout)\n\t\tself.timer=QTimer()\n\t\tself.timer.timeout.connect(self.set_time)\n\t\tself.timer.start(0)\n\t\tself.obrazek = QPixmap(\"Mentzen.png\")\n\t\tself.mietek = QLabel()\n\t\tself.mietek.setPixmap(self.obrazek)\n\t\tself.layout.addWidget(self.mietek)\n\t\t\n\t\t\n\n\napp = QApplication(sys.argv)\nwindow = Main_window()\nwindow.show()\nsys.exit(app.exec())\n", "sub_path": "Pythony/PyQt5_Mietek/megazegar.py", "file_name": "megazegar.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 7, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}]} {"seq_id": "504784757", "text": "#===== type hints ===========\nfrom typing import *\n\nx1 = 10 # type: int\n# x2 = 10 # type: str\n\nx3: int = 15\n\ndef my_plus(x: int, y: Optional[int]=None) -> int:\n if y is None:\n y = 88\n return x + y\n\nprint(my_plus(1, 2))\nprint(my_plus(2))\n# print(my_plus(\"hello\", \"world\"))\n\ndef g(x: Sequence[int]) -> None:\n print(len(x))\n print(x[1])\n for i in x:\n print(i)\n print()\n\ng([1,2,3,4])\ng((1,2,3,4))\n# g('ssjlsjisjsij')\n\n#===== defaultdict =====\nfrom collections import defaultdict\nfrom pprint import pprint\nnames = '''bob alice anna abba bot cat caro candy dom doc dog dot ab'''.split()\nd = defaultdict(list)\nfor name in names:\n feature = name[0]\n d[feature].append(name)\n\npprint(d, depth=40)\n\npprint(sorted(names, key=len))", "sub_path": "ModernPython/c3.py", "file_name": "c3.py", "file_ext": "py", "file_size_in_byte": 758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "collections.defaultdict", "line_number": 33, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 38, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 40, "usage_type": "call"}]} {"seq_id": "379492423", "text": "import numpy as np\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nfrom _Network.base_module import Network_Opts_base, Network_base,\\\n disp_to_depth, ResnetEncoder_fromV2,\\\n DepthDecoder\n \nclass Network_Opts(Network_Opts_base):\n def __init__(self):\n super().__init__()\n self.max_depth = 100\n self.min_depth = 0.1\n self.backbone = 18\n self.sample_mode = \"nearest\"\n\n self.use_depth = True\n\n\nclass BaseMonoDepthEstNetwork(Network_base):\n \"\"\"The following parameters should be included in opts:\n min_depth, max_depth: min and max depth in predict\n \"\"\"\n def __init__(self, opts, *args, **kargs):\n super().__init__(*args, **kargs)\n self.opts = opts\n\n self.networks[\"Encoder\"] = ResnetEncoder_fromV2(self.opts.backbone,\n True)\n if self.opts.backbone > 34:\n num_ch_enc = [64, 256, 512, 1024, 2048]\n else:\n num_ch_enc = [64, 64, 128, 256, 512]\n\n self.networks[\"Decoder\"] = DepthDecoder(num_ch_enc,\n num_output_channels=1)\n\n self.all_net = nn.ModuleList(v for k, v in self.networks.items())\n\n if self.logger.is_train: \n self.logger.make_logdir(\"Disp\") \n\n def forward(self, inputs, stage):\n image = inputs[(\"color_aug\", \"l\")]\n features = self.networks[\"Encoder\"](image)\n outputs = self.networks[\"Decoder\"](features)\n\n if not self.opts.min_depth:\n K = inputs[\"K\"]\n T = inputs[\"stereo_T\"]\n w = image.size()[3]\n self.compute_min_depth(K, T, w)\n\n # compute depth from disp\n for scale in range(3, -1, -1):\n disp = outputs[(\"disp\", scale)]\n if self.opts.use_depth:\n _, depth = disp_to_depth(disp, self.opts.min_depth,\n self.opts.max_depth)\n else:\n fx = inputs[\"K\"][0, 0, 0]\n img_width = image.size()[-1]\n depth = fx/(img_width * 0.3 * disp + 1e-10)\n outputs[(\"depth\", scale)] = depth\n\n if stage.is_visual and self.logger.is_train:\n self.logger.log_print(self.logger.step)\n v_dict = OrderedDict()\n v_modes = []\n v_size = [[\"img\"]]\n\n v_dict[\"img\"] = image.detach()\n v_modes.append(\"img\")\n for scale in range(0, 4):\n show_disp = outputs[(\"disp\", scale)].detach()\n self.logger.log_print(show_disp.mean())\n v_dict[\"disp_{}\".format(scale)] = show_disp\n v_modes.append(\"disp\")\n v_size.append([\"disp_{}\".format(scale)])\n\n self.logger.do_visualizion(\"Disp\", v_dict, v_modes, v_size, \"disp\")\n self.logger.log_print(\"==========\")\n\n return outputs\n\n def check_info(self):\n assert self.opts.height % 32 == 0, \"'height' must be a multiple of 32\"\n assert self.opts.width % 32 == 0, \"'width' must be a multiple of 32\"\n\n def get_trainable_params(self):\n names = [\"Encoder\", \"Decoder\"]\n muls = [1, 1]\n return self.get_module_params(names, muls)\n\n def compute_min_depth(self, K, T, w):\n fx = K[0, 0, 0]\n baseline = torch.abs(T[0, 0, 3])\n target_d = fx * baseline / (0.3 * w)\n disp_min = 1 / self.opts.max_depth\n disp_max = (1 - disp_min * 0.5 * target_d) / (0.5 * target_d)\n disp_max = disp_max.data\n self.opts.min_depth = 1 / disp_max \n print(self.opts.min_depth)\n", "sub_path": "_Network/monov2.py", "file_name": "monov2.py", "file_ext": "py", "file_size_in_byte": 3761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "_Network.base_module.Network_Opts_base", "line_number": 13, "usage_type": "name"}, {"api_name": "_Network.base_module.Network_base", "line_number": 24, "usage_type": "name"}, {"api_name": "_Network.base_module.ResnetEncoder_fromV2", "line_number": 32, "usage_type": "call"}, {"api_name": "_Network.base_module.DepthDecoder", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "_Network.base_module.disp_to_depth", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 101, "usage_type": "call"}]} {"seq_id": "223099100", "text": "from collections import OrderedDict\n\nfrom malcolm.core.loggable import Loggable\n\n\nclass Block(Loggable):\n \"\"\"Object consisting of a number of Attributes and Methods\"\"\"\n\n def __init__(self, name):\n \"\"\"\n Args:\n name (str): Block name e.g. \"BL18I:ZEBRA1\"\n \"\"\"\n super(Block, self).__init__(logger_name=name)\n self.name = name\n self._methods = OrderedDict()\n self._attributes = OrderedDict()\n\n def add_attribute(self, attribute):\n \"\"\"Add an Attribute to the block and set the block as its parent\"\"\"\n\n assert attribute.name not in self._attributes, \\\n \"Attribute %s already defined for Block %s\" \\\n % (attribute.name, self.name)\n self._attributes[attribute.name] = attribute\n attribute.set_parent(self)\n setattr(self, attribute.name, attribute)\n\n def add_method(self, method):\n \"\"\"Add a Method to the Block\n\n Args:\n method (Method): The Method object that has already been filled in\n \"\"\"\n assert method.name not in self._methods, \\\n \"Method %s already defined for Block %s\" % (method.name, self.name)\n self._methods[method.name] = method\n setattr(self, method.name, method)\n\n def handle_request(self, request):\n \"\"\"\n Process the request depending on the type\n\n Args:\n request(Request): Request object specifying action\n \"\"\"\n self.log_debug(\"Received request %s\", request)\n if request.type_ == request.POST:\n method_name = request.endpoint[-1]\n self._methods[method_name].handle_request(request)\n else:\n layer = self\n for next_link in request.endpoint[1:]:\n layer = getattr(layer, next_link)\n\n if hasattr(layer, \"to_dict\"):\n request.respond_with_return(layer.to_dict())\n else:\n request.respond_with_return(layer)\n\n def to_dict(self):\n \"\"\"Convert object attributes into a dictionary\"\"\"\n\n d = OrderedDict()\n\n for attribute_name, attribute in self._attributes.items():\n d[attribute_name] = attribute.to_dict()\n for method_name, method in self._methods.items():\n d[method_name] = method.to_dict()\n\n return d\n", "sub_path": "malcolm/core/block.py", "file_name": "block.py", "file_ext": "py", "file_size_in_byte": 2324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "malcolm.core.loggable.Loggable", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 17, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 64, "usage_type": "call"}]} {"seq_id": "399286349", "text": "\"\"\"\nChart objects and methods for displaying data.\n\"\"\"\n#pylint: disable=too-many-arguments,too-many-instance-attributes,protected-access\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\n\nfrom django.template import loader, Context\n\nfrom dancohenio.utils import avg\nfrom .colors import rgba\n\n\nclass Chart(object):\n \"\"\"\n Basically a task, but not reflected by a database construct, which\n allows us to duplicate without altering data or making calls to the\n db.\n \"\"\"\n def __init__(self, task, title=None, width=400, height=200, fill=True):\n \"\"\"Creates a new chart instance.\"\"\"\n self.width = width\n self.height = height\n self.fill = fill\n self.task = task\n self.pk = task.pk\n self.uom = task.unit_of_measure\n\n if title is not None:\n self.title = title\n else:\n self.title = task.name\n\n self.reference = self.title.replace(' ', '_').lower()\n self.name = task.name\n self.color = task.color\n self.background_color = task.background_color\n self.entries = task.entries\n self.x_data = [e.date for e in self.entries]\n self.y_data = [e.value for e in self.entries]\n\n self.subcharts = [self]\n\n def __repr__(self):\n \"\"\"Python representation of the object.\"\"\"\n return \"\" % (self.title, self.count())\n\n def __nonzero__(self):\n \"\"\"Return true if the chart would have at least one point on it.\"\"\"\n if self.count() > 0:\n return True\n else:\n return False\n\n def resample(self, by='M', agg=avg):\n \"\"\"Resample the entries of this chart.\"\"\"\n sampled_dates, sampled_values = resample(self.entries, by=by, agg=agg)\n self.x_data = sampled_dates\n self.y_data = sampled_values\n\n def mavg(self, lookback):\n \"\"\"Recalculates X & Y value as moving averages.\"\"\"\n x, y = mavg(self.entries, lookback=lookback)\n self.x_data = x\n self.y_data = y\n\n def _dataset_js(self):\n \"\"\"JS representation of the main dataset of this chart.\"\"\"\n data = {\n 'name': self.name,\n 'fill': bool_js(self.fill),\n 'color': self.color,\n 'bg_color': self.background_color,\n 'line_tension': 0,\n 'border_width': .5,\n 'point_border_width': 1,\n 'point_hover_radius': 5,\n 'point_hover_border_width': 1,\n 'point_radius': 1,\n 'point_hit_radius': 10,\n 'y': self._y()\n }\n js = \"\"\"{\n label: \"%(name)s\",\n fill: %(fill)s,\n borderColor: \"%(color)s\",\n backgroundColor: \"%(bg_color)s\",\n lineTension: %(line_tension)s,\n borderWith: %(border_width)s,\n pointBorderWidth: %(point_border_width)s,\n pointHoverRadius: %(point_hover_radius)s,\n pointHoverBorderWidth: %(point_hover_border_width)s,\n pointRadius: %(point_radius)s,\n pointHitRadius: %(point_hit_radius)s,\n data: [%(y)s]\n }\"\"\" % data\n return js\n\n def _datasets(self):\n \"\"\"Return all datasets for this chart as js formatted string.\"\"\"\n return ','.join([ch._dataset_js() for ch in self.subcharts])\n\n def _render_datasets(self):\n \"\"\"Set context values for the subhcarts.\"\"\"\n datasets = []\n for ch in self.subcharts:\n d = {\n 'name': ch.name,\n 'fill': bool_js(ch.fill),\n 'color': ch.color,\n 'bg_color': ch.background_color,\n 'line_tension': 0,\n 'border_width': .5,\n 'point_border_width': 1,\n 'point_hover_radius': 5,\n 'point_hover_border_width': 1,\n 'point_radius': 1,\n 'point_hit_radius': 10,\n 'y': ch._y()\n }\n datasets.append(d)\n return datasets\n\n def render(self):\n \"\"\"Render the chart as js.\"\"\"\n context = {\n 'reference': self.reference,\n 'labels': self._x(),\n 'datasets': self._render_datasets(),\n }\n template = loader.get_template('focus/chart_template.js')\n return template.render(Context(context))\n\n def count(self):\n \"\"\"Returns count of entries.\"\"\"\n assert len(self.x_data) == len(self.y_data)\n return len(self.x_data)\n\n def _x(self):\n \"\"\"\n Return the x axis formatted as needed.\n\n fmt: Dec. 19, 2012\n \"\"\"\n dates = []\n for x in self.x_data:\n try:\n date = x.date().strftime('%b. %d, %Y')\n except AttributeError:\n date = x\n dates.append(date)\n return ','.join(['\"%s\"' % x for x in dates])\n\n def _y(self):\n \"\"\"\n Returns a nicely formatted y axis.\n \"\"\"\n return ','.join([str(y) for y in self.y_data])\n\ndef bool_js(boolean):\n \"\"\"Return a python boolean as js boolean text.\"\"\"\n if boolean:\n return \"true\"\n else:\n return \"false\"\n\nclass ResampledChart(Chart):\n \"\"\"Creates a Chart object that has resampled its data.\"\"\"\n def __init__(self, task, by='M', agg=avg, title=None, width=400, height=200, fill=True):\n \"\"\"Instantiates the object.\"\"\"\n super(ResampledChart, self).__init__(task, title=title, width=width,\n height=height, fill=fill)\n resampled_dates, resampled_values = resample(self.entries, by=by, agg=agg)\n self.x_data = resampled_dates\n self.y_data = resampled_values\n\nclass MovingAverageChart(Chart):\n \"\"\"Creates a chart with a calculated moving average.\"\"\"\n def __init__(self, task, lookback, title=None, fill=False):\n \"\"\"\n Instantiates the object and calculates a moving average for the data.\n \"\"\"\n super(MovingAverageChart, self).__init__(task, title, fill=fill)\n x, y = mavg(self.entries, lookback=lookback)\n self.x_data = x\n self.y_data = y\n\nclass DualMovingAverageChart(MovingAverageChart):\n \"\"\"\n Graphs the task with two dual moving averages.\n \"\"\"\n def __init__(self, task, short_val, long_val, title=\"Dual Moving Averages\"):\n \"\"\"\n Creates several chart instances and joins them as subcharts.\n \"\"\"\n super(DualMovingAverageChart, self).__init__(\n task, lookback=short_val, title=title, fill=False\n )\n self.subcharts[0].name = 'Short'\n long_chart = MovingAverageChart(task, lookback=long_val)\n long_chart.name = 'Long'\n long_chart.color = rgba(task.red, task.green, task.blue, .4)\n\n # equalize lengths\n max_length = min([len(self.y_data), len(long_chart.y_data)])\n self.x_data = self.x_data[-max_length:]\n self.y_data = self.y_data[-max_length:]\n long_chart.x_data = long_chart.x_data[-max_length:]\n long_chart.y_data = long_chart.y_data[-max_length:]\n #assert self.count() == long_chart.count()\n\n self.subcharts.append(long_chart)\n\nclass MovingAverageEnvelopeChart(MovingAverageChart):\n \"\"\"\n Graphs the task with two or more moving averages.\n \"\"\"\n def __init__(self, task, bands, title=\"Moving Average Envelope\", fill=False):\n \"\"\"\n Takes a task and a list of lookback values and creates a chart of\n multiple bands of moving averages.\n \"\"\"\n super(MovingAverageEnvelopeChart, self).__init__(\n task, lookback=bands[0], title=title, fill=fill\n )\n self.subcharts[0].name = bands[0]\n for i in bands[1:]:\n subchart = MovingAverageChart(task, lookback=i, fill=fill)\n subchart.name = i\n subchart.color = rgba(task.red, task.green, task.blue, 1 - .1*i)\n self.subcharts.append(subchart)\n\n # equalize lengths\n max_length = min([len(ch.x_data) for ch in self.subcharts])\n\n self.x_data = self.x_data[-max_length:]\n self.y_data = self.y_data[-max_length:]\n\n for ch in self.subcharts:\n ch.x_data = ch.x_data[-max_length:]\n ch.y_data = ch.y_data[-max_length:]\n #assert self.count() == long_chart.count()\n\nclass TimeFrameChart(Chart):\n \"\"\"Chart that is started/stopped at given times.\"\"\"\n def __init__(self, task, fromdate, todate, title):\n super(TimeFrameChart, self).__init__(task, title=title)\n selected = [e for e in self.entries\n if e.date >= fromdate and e.date <= todate]\n self.entries = selected\n self.x_data = [e.date for e in selected]\n self.y_data = [e.value for e in selected]\n\n\ndef mavg(entries, lookback):\n \"\"\"\n Returns two lists, one for each of x_axis and y_axis where the\n y axis is a calculated moving average of the given count.\n \"\"\"\n logged, x, y = [], [], []\n for e in entries:\n logged.append(e)\n if len(logged) < lookback:\n continue\n x.append(e.date)\n sample = [float(e.value) for e in logged[-lookback:]]\n assert len(sample) == lookback\n y.append(avg(sample))\n return x, y\n\ndef resample(entries, by='M', agg=avg):\n \"\"\"\n Resample date entries by a group of size [default: M]\n \"\"\"\n groups = OrderedDict()\n for e in entries:\n if by == 'M':\n sample = reduce_by_month(e.date)\n elif by == 'W':\n sample = reduce_by_week(e.date)\n elif by == 'Y':\n sample = e.date.year\n elif by == 'd':\n sample = reduce_by_day(e.date)\n else:\n raise NotImplementedError\n\n val = float(e.value)\n\n try:\n groups[sample].append(val)\n except KeyError:\n groups[sample] = [val]\n\n groups = ffill(groups, by=by)\n\n x, y = [], []\n for group, values in groups.items():\n x.append(group)\n y.append(agg(values))\n\n return x, y\n\ndef reduce_by_month(date):\n \"\"\"Returns a datetime object that is the first of its month.\"\"\"\n return datetime(date.year, date.month, 1, tzinfo=date.tzinfo)\n\ndef reduce_by_week(date):\n \"\"\"Returns a datetime object representing the first day of that week.\"\"\"\n date = reduce_by_day(date)\n return date - timedelta(days=date.isocalendar()[2] - 1)\n\ndef reduce_by_day(dateish):\n \"\"\"Turn a datetime object into a date, leave dates alone.\"\"\"\n try:\n return dateish.date()\n except AttributeError:\n return dateish\n\ndef advance_by_month(date):\n \"\"\"Returns the date one month later.\"\"\"\n if date.month == 12:\n return date.replace(year=date.year + 1, month=1)\n else:\n return date.replace(month=date.month + 1)\n\ndef ffill(groups, by='M', fill_value=0., fill_method=None):\n \"\"\"\n Takes a group dict and fills missing time keys with the value in\n fill_value.\n \"\"\"\n if not groups: return groups\n\n if by == 'd':\n dates = [reduce_by_day(d) for d in groups.keys()]\n dates.sort()\n step = lambda x: x + timedelta(days=1)\n elif by == 'M':\n dates = [reduce_by_month(d) for d in groups.keys()]\n dates.sort()\n step = advance_by_month\n elif by == 'W':\n dates = [reduce_by_week(d) for d in groups.keys()]\n dates.sort()\n step = lambda x: x + timedelta(days=7)\n elif by == 'Y':\n dates = [d for d in groups.keys()]\n dates.sort()\n step = lambda x: x + 1\n else:\n raise NotImplementedError\n\n d = dates[0]\n previous_value = fill_value\n while d <= dates[-1]:\n if d not in groups.keys():\n if fill_method == 'previous':\n groups[d] = previous_value\n else:\n groups[d] = [fill_value]\n else:\n previous_value = groups[d]\n d = step(d)\n\n sorted_groups = OrderedDict()\n for key in sorted(groups.keys()):\n sorted_groups[key] = groups[key]\n return sorted_groups\n\ndef filter_dates(entries, fromdate, todate):\n \"\"\"\n Filters entries by input dates\n \"\"\"\n return [e for e in entries if e.date >= fromdate and e.date <= todate]\n\ndef x_data(dates):\n \"\"\"\n Return the x axis formatted as needed.\n\n fmt: Dec. 19, 2012\n \"\"\"\n date_strings = []\n for x in dates:\n try:\n date = x.date().strftime('%b. %d, %Y')\n except AttributeError:\n date = x\n date_strings.append(date)\n return ','.join(['\"%s\"' % x for x in date_strings])\n\ndef y_data(values):\n \"\"\"\n Returns a nicely formatted y axis.\n \"\"\"\n return ','.join([str(y) for y in values])\n\ndef equalize_lengths(axes):\n \"\"\"Equalize the lengths of all the input axes.\"\"\"\n max_length = min([len(axis[0]) for axis in axes])\n print('Maximum length should be set to %d' % max_length)\n new_axes = []\n for x, y in axes:\n x = x[-max_length:]\n y = y[-max_length:]\n print(len(x), len(y))\n new_axes.append([x, y])\n return new_axes\n", "sub_path": "focus/charts.py", "file_name": "charts.py", "file_ext": "py", "file_size_in_byte": 13008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "dancohenio.utils.avg", "line_number": 55, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 131, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 131, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 132, "usage_type": "call"}, {"api_name": "dancohenio.utils.avg", "line_number": 169, "usage_type": "name"}, {"api_name": "colors.rgba", "line_number": 202, "usage_type": "call"}, {"api_name": "colors.rgba", "line_number": 230, "usage_type": "call"}, {"api_name": "dancohenio.utils.avg", "line_number": 268, "usage_type": "call"}, {"api_name": "dancohenio.utils.avg", "line_number": 271, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 275, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 306, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 311, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 337, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 345, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 365, "usage_type": "call"}]} {"seq_id": "262913255", "text": "from __future__ import division\n\nfrom math import ceil\nfrom time import time\nfrom itertools import groupby\nfrom collections import deque\n\nimport six\nfrom numpy import arange, mean\nimport matplotlib.pyplot as plt\n\nfrom neupy.utils import format_data, is_row1d\nfrom neupy.helpers import preformat_value\nfrom neupy.core.base import BaseSkeleton\nfrom neupy.core.properties import (Property, FuncProperty, NumberProperty,\n BoolProperty)\nfrom neupy.layers import BaseLayer, OutputLayer\nfrom neupy.functions import normilize_error_output, mse\nfrom .utils import iter_until_converge, shuffle\nfrom .connections import (FAKE_CONNECTION, LayerConnection,\n NetworkConnectionError)\n\n\n__all__ = ('BaseNetwork',)\n\n\ndef show_training_summary(network):\n network.logs.data(\"\"\"\n Epoch {epoch}\n Train error: {error}\n Validation error: {error_out}\n Epoch time: {epoch_time} sec\n \"\"\".format(\n epoch=network.epoch,\n error=network.last_error_in() or '-',\n error_out=network.last_error_out() or '-',\n epoch_time=round(network.train_epoch_time, 5)\n ))\n\n\ndef show_epoch_summary(network, show_epoch):\n delay_limit = 1 # in seconds\n prev_summary_time = None\n delay_history_length = 10\n terminal_output_delays = deque(maxlen=delay_history_length)\n\n while True:\n now = time()\n\n if prev_summary_time is not None:\n time_delta = now - prev_summary_time\n terminal_output_delays.append(time_delta)\n\n show_training_summary(network)\n prev_summary_time = now\n\n if len(terminal_output_delays) == delay_history_length:\n prev_summary_time = None\n average_delay = mean(terminal_output_delays)\n\n if average_delay < delay_limit:\n show_epoch *= ceil(delay_limit / average_delay)\n network.logs.warning(\n \"Too many outputs in a terminal. Set \"\n \"up logging after each {} epoch\"\n \"\".format(show_epoch)\n )\n terminal_output_delays.clear()\n\n yield show_epoch\n\n\ndef shuffle_train_data(input_train, target_train):\n if target_train is None:\n return shuffle(input_train), None\n return shuffle(input_train, target_train)\n\n\ndef clean_layers(connection):\n \"\"\" Clean layers connections and format transform them into one format.\n Also this function validate layers connections.\n\n Parameters\n ----------\n connection : list, tuple or object\n Layers connetion in different formats.\n\n Returns\n -------\n object\n Cleaned layers connection.\n \"\"\"\n if connection == FAKE_CONNECTION:\n return connection\n\n if isinstance(connection, tuple):\n connection = list(connection)\n\n islist = isinstance(connection, list)\n\n if islist and isinstance(connection[0], BaseLayer):\n chain_connection = connection.pop()\n for layer in reversed(connection):\n chain_connection = LayerConnection(layer, chain_connection)\n connection = chain_connection\n\n elif islist and isinstance(connection[0], LayerConnection):\n pass\n\n if not isinstance(connection.output_layer, OutputLayer):\n raise NetworkConnectionError(\"Final layer must be OutputLayer class \"\n \"instance.\")\n\n return connection\n\n\ndef parse_show_epoch_property(value, n_epochs):\n if isinstance(value, int):\n return value\n\n number_end_position = value.index('time')\n # Ignore grammar mistakes like `2 time`, this error could be\n # really annoying\n n_epochs_to_check = int(value[:number_end_position].strip())\n\n if n_epochs <= n_epochs_to_check:\n return 1\n\n return int(round(n_epochs / n_epochs_to_check))\n\n\nclass ShowEpochProperty(Property):\n \"\"\" Class helps validate specific syntax for `show_epoch`\n property from ``BaseNetwork`` class.\n \"\"\"\n expected_type = tuple([int] + [six.string_types])\n\n def validate(self, value):\n if not isinstance(value, six.string_types):\n if value < 1:\n raise ValueError(\"Property `{}` value should be integer \"\n \"greater than zero or string. See the \"\n \"documentation for more information.\"\n \"\".format(self.name))\n return\n\n if 'time' not in value:\n raise ValueError(\"`{}` value has invalid string format.\"\n \"\".format(self.name))\n\n valid_endings = ('times', 'time')\n number_end_position = value.index('time')\n number_part = value[:number_end_position].strip()\n\n if not value.endswith(valid_endings) or not number_part.isdigit():\n valid_endings_formated = ', '.join(valid_endings)\n raise ValueError(\n \"Property `{}` in string format should be a positive number \"\n \"with one of those endings: {}. For example: `10 times`.\"\n \"\".format(self.name, valid_endings_formated)\n )\n\n if int(number_part) < 1:\n raise ValueError(\"Part that related to the number in `{}` \"\n \"property should be an integer greater or \"\n \"equal to one.\".format(self.name))\n\n\nclass BaseNetwork(BaseSkeleton):\n \"\"\" Base class Network algorithms.\n\n Parameters\n ----------\n {full_params}\n\n Methods\n -------\n {plot_errors}\n {last_error}\n \"\"\"\n error = FuncProperty(default=mse)\n use_bias = BoolProperty(default=True)\n step = NumberProperty(default=0.1)\n\n # Training settings\n show_epoch = ShowEpochProperty(min_size=1, default='10 times')\n shuffle_data = BoolProperty(default=False)\n\n # Signals\n train_epoch_end_signal = FuncProperty()\n train_end_signal = FuncProperty()\n\n def __init__(self, connection, **options):\n self.connection = clean_layers(connection)\n\n self.errors_in = []\n self.errors_out = []\n self.train_epoch_time = None\n\n self.layers = list(self.connection)\n self.input_layer = self.layers[0]\n self.output_layer = self.layers[-1]\n self.train_layers = self.layers[:-1]\n\n super(BaseNetwork, self).__init__(**options)\n self.setup_defaults()\n\n if self.verbose:\n self.show_network_options(highlight_options=options)\n\n self.init_layers()\n\n def show_network_options(self, highlight_options=None):\n available_classes = [cls.__name__ for cls in self.__class__.__mro__]\n logs = self.logs\n\n if highlight_options is None:\n highlight_options = {}\n\n def classname_grouper(option):\n classname = option[1].class_name\n class_priority = -available_classes.index(classname)\n return (class_priority, classname)\n\n # Sort and group options by classes\n grouped_options = groupby(\n sorted(self.options.items(), key=classname_grouper),\n key=classname_grouper\n )\n\n if isinstance(self.connection, LayerConnection):\n logs.header(\"Network structure\")\n logs.log(\"LAYERS\", self.connection)\n\n # Just display in terminal all network options.\n logs.header(\"Network options\")\n for (_, clsname), class_options in grouped_options:\n if not class_options:\n # When in some class we remove all available attributes\n # we just skip it.\n continue\n\n logs.simple(\"{}:\".format(clsname))\n\n for key, data in sorted(class_options):\n if key in highlight_options:\n logger = logs.log\n value = highlight_options[key]\n else:\n logger = logs.gray_log\n value = data.value\n\n logger(\"OPTION\", \"{} = {}\".format(\n key, preformat_value(value))\n )\n logs.empty()\n\n def setup_defaults(self):\n \"\"\" Setup default values before populate the options.\n \"\"\"\n\n # ----------------- Neural Network Layers ---------------- #\n\n def init_layers(self):\n \"\"\" Initialize layers.\n \"\"\"\n if self.connection == FAKE_CONNECTION:\n return\n\n for layer in self.train_layers:\n layer.initialize(with_bias=self.use_bias)\n\n # ----------------- Neural Network Train ---------------- #\n\n def _train(self, input_train, target_train=None, input_test=None,\n target_test=None, epochs=100, epsilon=None):\n \"\"\" Main method for the Neural Network training.\n \"\"\"\n\n # ----------- Pre-format target data ----------- #\n\n input_row1d = is_row1d(self.input_layer)\n input_train = format_data(input_train, row1d=input_row1d)\n\n target_row1d = is_row1d(self.output_layer)\n target_train = format_data(target_train, row1d=target_row1d)\n\n if input_test is not None:\n input_test = format_data(input_test, row1d=input_row1d)\n\n if target_test is not None:\n target_test = format_data(target_test, row1d=target_row1d)\n\n # ----------- Validate input values ----------- #\n\n if epsilon is not None and epochs <= 2:\n raise ValueError(\"Network should train at teast 3 epochs before \"\n \"check the difference between errors\")\n\n # ----------- Predefine parameters ----------- #\n\n self.epoch = 1\n show_epoch = self.show_epoch\n logs = self.logs\n compute_error_out = (input_test is not None and\n target_test is not None)\n predict = self.predict\n last_epoch_shown = 0\n\n if epsilon is not None:\n iterepochs = iter_until_converge(self, epsilon, max_epochs=epochs)\n\n if isinstance(show_epoch, six.string_types):\n show_epoch = 100\n logs.warning(\"Can't use `show_epoch` value in converging \"\n \"mode. Set up 100 to `show_epoch` property \"\n \"by default.\")\n\n else:\n iterepochs = range(1, epochs + 1)\n show_epoch = parse_show_epoch_property(show_epoch, epochs)\n\n epoch_summary = show_epoch_summary(self, show_epoch)\n\n # ----------- Train process ----------- #\n\n logs.header(\"Start train\")\n logs.log(\"TRAIN\", \"Train data size: {}\".format(input_train.shape[0]))\n\n if input_test is not None:\n logs.log(\"TRAIN\", \"Validation data size: {}\"\n \"\".format(input_test.shape[0]))\n\n if epsilon is None:\n logs.log(\"TRAIN\", \"Total epochs: {}\".format(epochs))\n else:\n logs.log(\"TRAIN\", \"Max epochs: {}\".format(epochs))\n\n logs.empty()\n\n # Optimizations for long loops\n errors = self.errors_in\n errors_out = self.errors_out\n shuffle_data = self.shuffle_data\n\n error_func = self.error\n train_epoch = self.train_epoch\n train_epoch_end_signal = self.train_epoch_end_signal\n train_end_signal = self.train_end_signal\n\n self.input_train = input_train\n self.target_train = target_train\n\n for epoch in iterepochs:\n self.epoch = epoch\n epoch_start_time = time()\n\n if shuffle_data:\n input_train, target_train = shuffle_train_data(input_train,\n target_train)\n self.input_train = input_train\n self.target_train = target_train\n\n try:\n error = train_epoch(input_train, target_train)\n\n if compute_error_out:\n predicted_test = predict(input_test)\n error_out = error_func(predicted_test, target_test)\n errors_out.append(error_out)\n\n errors.append(error)\n self.train_epoch_time = time() - epoch_start_time\n\n if epoch % show_epoch == 0 or epoch == 1:\n show_epoch = next(epoch_summary)\n last_epoch_shown = epoch\n\n if train_epoch_end_signal is not None:\n train_epoch_end_signal(self)\n\n except StopIteration as err:\n logs.log(\"TRAIN\", \"Epoch #{} stopped. {}\"\n \"\".format(epoch, str(err)))\n break\n\n if epoch != last_epoch_shown:\n show_training_summary(self)\n\n if train_end_signal is not None:\n train_end_signal(self)\n\n logs.log(\"TRAIN\", \"End train\")\n\n # ----------------- Errors ----------------- #\n\n def _last_error(self, errors):\n if errors and errors[-1] is not None:\n return normilize_error_output(errors[-1])\n\n def last_error_in(self):\n return self._last_error(self.errors_in)\n\n def last_error(self):\n return self._last_error(self.errors_in)\n\n def last_error_out(self):\n return self._last_error(self.errors_out)\n\n def previous_error(self):\n errors = self.errors_in\n return normilize_error_output(errors[-2]) if len(errors) > 2 else None\n\n def _normalized_errors(self, errors):\n if not len(errors) or isinstance(errors[0], float):\n return errors\n\n self.logs.warning(\"Your errors bad formated for plot output. \"\n \"They will be normilized.\")\n\n normilized_errors = []\n for error in errors:\n normilized_errors.append(normilize_error_output(error))\n\n return normilized_errors\n\n def normalized_errors_in(self):\n return self._normalized_errors(self.errors_in)\n\n def normalized_errors_out(self):\n return self._normalized_errors(self.errors_out)\n\n def plot_errors(self, logx=False):\n if not self.errors_in:\n return\n\n errors_in = self.normalized_errors_in()\n errors_out = self.normalized_errors_out()\n errors_range = arange(len(errors_in))\n plot_function = plt.semilogx if logx else plt.plot\n\n line_error_in, = plot_function(errors_range, errors_in)\n title_text = 'Learning error after each epoch'\n\n if errors_out:\n line_error_out, = plot_function(errors_range, errors_out)\n plt.legend(\n [line_error_in, line_error_out],\n ['Train error', 'Validation error']\n )\n title_text = 'Learning errors after each epoch'\n\n plt.title(title_text)\n plt.xlim(0)\n\n plt.ylabel('Error')\n plt.xlabel('Epoch')\n\n plt.show()\n\n # ----------------- Representations ----------------- #\n\n def get_class_name(self):\n return self.__class__.__name__\n\n def __repr__(self):\n classname = self.get_class_name()\n options_repr = self._repr_options()\n\n if self.connection != FAKE_CONNECTION:\n return \"{}({}, {})\".format(classname, self.connection,\n options_repr)\n return \"{}({})\".format(classname, options_repr)\n", "sub_path": "neupy/network/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 15226, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "collections.deque", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 59, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.shuffle", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.shuffle", "line_number": 76, "usage_type": "call"}, {"api_name": "connections.FAKE_CONNECTION", "line_number": 93, "usage_type": "name"}, {"api_name": "neupy.layers.BaseLayer", "line_number": 101, "usage_type": "argument"}, {"api_name": "connections.LayerConnection", "line_number": 104, "usage_type": "call"}, {"api_name": "connections.LayerConnection", "line_number": 107, "usage_type": "argument"}, {"api_name": "neupy.layers.OutputLayer", "line_number": 110, "usage_type": "argument"}, {"api_name": "connections.NetworkConnectionError", "line_number": 111, "usage_type": "call"}, {"api_name": "neupy.core.properties.Property", "line_number": 132, "usage_type": "name"}, {"api_name": "six.string_types", "line_number": 136, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 139, "usage_type": "attribute"}, {"api_name": "neupy.core.base.BaseSkeleton", "line_number": 169, "usage_type": "name"}, {"api_name": "neupy.core.properties.FuncProperty", "line_number": 181, "usage_type": "call"}, {"api_name": "neupy.functions.mse", "line_number": 181, "usage_type": "name"}, {"api_name": "neupy.core.properties.BoolProperty", "line_number": 182, "usage_type": "call"}, {"api_name": "neupy.core.properties.NumberProperty", "line_number": 183, "usage_type": "call"}, {"api_name": "neupy.core.properties.BoolProperty", "line_number": 187, "usage_type": "call"}, {"api_name": "neupy.core.properties.FuncProperty", "line_number": 190, "usage_type": "call"}, {"api_name": "neupy.core.properties.FuncProperty", "line_number": 191, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 226, "usage_type": "call"}, {"api_name": "connections.LayerConnection", "line_number": 231, "usage_type": "argument"}, {"api_name": "neupy.helpers.preformat_value", "line_number": 254, "usage_type": "call"}, {"api_name": "connections.FAKE_CONNECTION", "line_number": 267, "usage_type": "name"}, {"api_name": "neupy.utils.is_row1d", "line_number": 282, "usage_type": "call"}, {"api_name": "neupy.utils.format_data", "line_number": 283, "usage_type": "call"}, {"api_name": "neupy.utils.is_row1d", "line_number": 285, "usage_type": "call"}, {"api_name": "neupy.utils.format_data", "line_number": 286, "usage_type": "call"}, {"api_name": "neupy.utils.format_data", "line_number": 289, "usage_type": "call"}, {"api_name": "neupy.utils.format_data", "line_number": 292, "usage_type": "call"}, {"api_name": "utils.iter_until_converge", "line_number": 311, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 313, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 356, "usage_type": "call"}, {"api_name": "time.time", "line_number": 373, "usage_type": "call"}, {"api_name": "neupy.functions.normilize_error_output", "line_number": 399, "usage_type": "call"}, {"api_name": "neupy.functions.normilize_error_output", "line_number": 412, "usage_type": "call"}, {"api_name": "neupy.functions.normilize_error_output", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 439, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 440, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 440, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 447, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 447, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 456, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 456, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 457, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 459, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 459, "usage_type": "name"}, {"api_name": "connections.FAKE_CONNECTION", "line_number": 470, "usage_type": "name"}]} {"seq_id": "412729468", "text": "#######################################################\n#\n# TAKFreeServer.py\n# Original author: naman108\n# This code is Open Source, made available under the EPL 2.0 license.\n# https://www.eclipse.org/legal/eplfaq.php\n# credit to Harshini73 for base code\n#\n#######################################################\nimport argparse\nimport datetime\nimport logging\nimport os\nimport socket\nimport sqlite3\nimport sys\nimport threading\nimport time\nimport traceback\nimport uuid\nimport xml.etree.ElementTree as ET\nfrom logging.handlers import RotatingFileHandler\n\nimport constants\nimport SQLcommands\nfrom Controllers.RequestCOTController import RequestCOTController\nfrom Controllers.serializer import Serializer\n\nconst = constants.vars()\nsql = SQLcommands.sql()\n\n\ndef newHandler(filename, log_level, log_format):\n handler = RotatingFileHandler(\n filename,\n maxBytes=const.MAXFILESIZE,\n backupCount=const.BACKUPCOUNT\n )\n handler.setFormatter(log_format)\n handler.setLevel(log_level)\n return handler\n\n\nlog_format = logging.Formatter(const.LOGFORMAT)\nlogger = logging.getLogger(const.LOGNAME)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(newHandler(const.DEBUGLOG, logging.DEBUG, log_format))\nlogger.addHandler(newHandler(const.WARNINGLOG, logging.WARNING, log_format))\nlogger.addHandler(newHandler(const.INFOLOG, logging.INFO, log_format))\nconsole = logging.StreamHandler(sys.stdout)\nconsole.setFormatter(log_format)\nconsole.setLevel(logging.DEBUG)\nlogger.addHandler(console)\n\n\n''' Server class '''\nclass ThreadedServer(object):\n def __init__(self, host=const.IP, port=const.PORT):\n # change from string\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.client_dict = {}\n logger.info(f\"Server IP: {host}, server port: {port}\")\n self.emergencyDict = {}\n # configure sql database\n with sqlite3.connect(const.DATABASE) as db:\n cursor = db.cursor()\n cursor.execute(sql.CREATEUSERSTABLE)\n cursor.close()\n db.commit()\n self.bandaidUID = ''\n\n def listen(self):\n '''\n listen for client connections and begin thread if found\n '''\n threading.Thread(target=self.bandaid, args=(), daemon=True).start()\n self.sock.listen(1000)\n while True:\n try:\n client, address = self.sock.accept()\n threading.Thread(target=self.listenToClient, args=(client, address), daemon=True).start()\n except:\n logger.error(traceback.format_exc())\n logger.error('Error in listen()')\n\n def bandaid(self):\n while True:\n try:\n start = datetime.datetime.now()\n end = start + datetime.timedelta(minutes=const.RENEWTIME)\n while datetime.datetime.now() < end:\n time.sleep(10)\n self.bandaidUID = uuid.uuid1()\n mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n mysock.connect(('127.0.0.1', const.PORT))\n mysock.send(Serializer().serializerRoot(RequestCOTController().ping(eventuid=self.bandaidUID)).encode())\n mysock.recv(2048)\n mysock.shutdown(socket.SHUT_RDWR)\n mysock.close()\n logger.info('finished bandaid keepalive')\n logger.debug(f\"Currently running {len(threading.enumerate())} threads\")\n except ConnectionRefusedError:\n logger.warning(\"Bandaid listening socket was closed\")\n except:\n logger.error(traceback.format_exc())\n logger.error(\"Error in bandaid()\")\n\n def check_xml(self, xml_string, current_id):\n '''\n check xml type or class\n '''\n data_value = ''\n try:\n if xml_string == const.EMPTY_BYTE:\n logger.info('client disconnected via empty byte response')\n self.client_dict[current_id]['alive'] = 0\n logger.info(str(self.client_dict[current_id]['uid'])+' disconnected')\n return const.FAIL\n elif xml_string == None:\n logger.info('client disconnected via none response')\n self.client_dict[current_id]['alive'] = 0\n logger.info(str(self.client_dict[current_id]['uid'])+' disconnected')\n return const.FAIL\n\n tree = ET.fromstring(xml_string)\n uid = tree.get('uid')\n logger.debug('parsing data uid is ' + str(uid))\n cot_type = tree.get('type')\n if cot_type == \"a-f-G-U-C\":\n self.client_dict[current_id]['id_data'] = xml_string\n elif cot_type == 'b-f-t-a':\n destination = tree.find('detail').find('marti').find('dest').attrib['callsign']\n connData = self.client_dict[current_id][\"id_data\"]\n for x in self.client_dict:\n if self.client_dict[x][\"callsign\"] == destination:\n self.client_dict[x][\"main_data\"].append(connData)\n logger.info('adding conn data to '+str(x))\n logger.info(f\"Now adding the following connection data: {str(connData)}\")\n if uid.endswith(\"9-1-1\"):\n for x in tree.iter('emergency'):\n if x.get('cancel') != 'true':\n self.emergencyDict[uid] = xml_string\n else:\n del self.emergencyDict[uid]\n elif uid.endswith(const.PING):\n data_value = const.PING\n logger.debug(f\"Received a ping: {xml_string}\")\n elif uid.startswith(const.GEOCHAT):\n data_value = const.GEOCHAT\n logger.debug(f\"Received a GeoChat: {xml_string}\")\n else:\n logger.debug(f\"Received CoT: {xml_string}\")\n\n # adds data to all connected client data list except sending client\n for detail in tree.findall('detail'):\n marti = detail.find('marti')\n if marti != None:\n dest = marti.find('dest')\n callsign = dest.attrib['callsign']\n for client_id in self.client_dict:\n if self.client_dict[client_id]['callsign'] == callsign:\n self.client_dict[client_id]['main_data'].append(xml_string)\n else:\n for client_id in self.client_dict:\n if client_id != current_id:\n self.client_dict[client_id]['main_data'].append(xml_string)\n return data_value\n\n except:\n logger.error(traceback.format_exc())\n logger.error(f\"Error in check_xml for: {xml_string}\")\n\n def connectionSetup(self, client, address):\n db = sqlite3.connect(const.DATABASE)\n try:\n cursor = db.cursor()\n first_run = 1\n # Create client dictionary within main dictionary containing arrays for data and chat also other stuff for client initial connection\n total_clients_connected = 0\n total_clients_connected += 1\n id_data = client.recv(const.STARTBUFFER)\n logger.debug(f\"id_data = {id_data}\")\n tree = ET.fromstring(id_data)\n uid = tree.get('uid')\n if uid == self.bandaidUID:\n return 'Bandaid'\n callsign = tree.find('detail').find('contact').attrib['callsign']\n current_id = uuid.uuid1().int\n\n # add identifying information\n self.client_dict[current_id] = {\n 'id_data': id_data,\n 'main_data': [],\n 'alive': 1,\n 'uid': uid,\n 'client': client,\n 'callsign': callsign\n }\n cursor.execute(sql.INSERTNEWUSER, (str(current_id), str(uid), str(callsign)))\n cursor.close()\n db.commit()\n logger.info(f\"Client connected, initial information for current_id={current_id}: {self.client_dict[current_id]}\")\n return str(first_run)+' ? '+str(total_clients_connected)+' ? '+str(id_data)+' ? '+str(current_id)\n except:\n logger.error(traceback.format_exc())\n logger.error('Error in connection setup')\n return \"error\"\n finally:\n db.close()\n\n def recieveAll(self, client):\n try:\n total_data = []\n while True:\n data = client.recv(const.BUFFER)\n logger.debug(f\"Received {sys.getsizeof(data)} bytes from {client}\")\n if sys.getsizeof(data) == const.BUFFER+33:\n total_data.append(data)\n elif sys.getsizeof(data) < const.BUFFER+33:\n total_data.append(data)\n break\n total_data = b''.join(total_data)\n return total_data\n except:\n logger.error(traceback.format_exc())\n logger.error(f\"Error in recieveAll() from {client}\")\n return None\n\n def listenToClient(self, client, address):\n ''' \n Function to receive data from the client. this must be long as everything\n '''\n try:\n defaults = self.connectionSetup(client, address)\n if defaults == 'error':\n client.shutdown(socket.SHUT_RDWR)\n client.close()\n return\n elif defaults == 'Bandaid':\n self.sock.shutdown(socket.SHUT_RDWR)\n client.close()\n return\n else:\n defaults = defaults.split(' ? ')\n logger.debug(defaults)\n first_run = int(defaults[0])\n id_data = bytes(defaults[2], 'utf-8')\n current_id = int(defaults[3])\n # main connection loop\n while True:\n # Receive data\n try:\n if first_run == 0:\n data = self.recieveAll(client)\n logger.debug(f\"Received data from client: {str(data)}\")\n working = self.check_xml(data, current_id)\n # checking if check_xml detected client disconnect\n if working == const.FAIL:\n timeoutInfo = Serializer().serializerRoot(RequestCOTController().timeout(\n eventhow='h-g-i-g-o',\n eventuid=uuid.uuid1(),\n linkuid=self.client_dict[current_id]['uid']\n ))\n logger.debug(f\"Sending timeout: {timeoutInfo.encode()}\")\n for client_id in self.client_dict:\n if client_id != current_id:\n self.client_dict[client_id]['client'].send(timeoutInfo.encode())\n uid = self.client_dict[current_id]['uid']\n del self.client_dict[current_id]\n with sqlite3.connect(const.DATABASE) as db:\n cursor = db.cursor()\n cursor.execute(sql.DELETEBYUID, (uid,))\n cursor.close()\n db.commit()\n client.shutdown(socket.SHUT_RDWR)\n client.close()\n return\n elif working == const.PING:\n logger.debug('Received ping')\n\n elif first_run == 1:\n for client_id in self.client_dict:\n client = self.client_dict[client_id]['client']\n if client != self.client_dict[current_id]['client']:\n logger.info('Sending '+str(id_data))\n client.send(self.client_dict[current_id]['id_data'])\n for client_id in self.client_dict:\n data = self.client_dict[client_id]['id_data']\n logger.debug('Sending conn data to '+str(client))\n client.send(data)\n threading.Thread(\n target=self.sendClientData,\n args=(client, address, current_id),\n daemon=True).start()\n\n # just some debug stuff\n first_run = 0\n except:\n logger.error(traceback.format_exc())\n logger.error('Error in listenToClient() main loop')\n client.close()\n return\n except Exception as e:\n logger.error(traceback.format_exc())\n logging.error(\"Unknown error in listenToClient\")\n client.close()\n\n def sendClientData(self, client, address, current_id):\n try:\n while True:\n time.sleep(const.DELAY)\n for uid in self.emergencyDict:\n client.send(self.emergencyDict[uid])\n logger.info(f\"Emergency activated: {uid}\")\n\n if len(self.client_dict[current_id]['main_data']) > 0:\n for x in self.client_dict[current_id]['main_data']:\n logger.debug(self.client_dict[current_id]['main_data'])\n client.send(x)\n logger.info('Sent ' + str(x) + ' to ' + str(address))\n self.client_dict[current_id]['main_data'].remove(x)\n else:\n client.send(Serializer().serializerRoot(RequestCOTController().ping(eventuid=uuid.uuid1())).encode())\n except:\n logger.error(traceback.format_exc())\n logger.warning('Error in sendClientData')\n finally:\n client.close()\n\n\ndef startup():\n logger.info('starting windows service')\n ThreadedServer(host=const.IP, port=const.PORT).listen()\n\n\nif __name__ == \"__main__\":\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", type=int)\n args = parser.parse_args()\n port = args.p\n except:\n ThreadedServer(host='', port=const.PORT).listen()\n logger.error(f\"Failed to read port number from command arguments, defaulting to {const.PORT}\")\n port = const.PORT\n ThreadedServer(host=const.IP, port=port).listen()\n", "sub_path": "TAKfreeServer/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 15001, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "constants.vars", "line_number": 29, "usage_type": "call"}, {"api_name": "SQLcommands.sql", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 47, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 48, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 49, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 50, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 52, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 62, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 62, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 62, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 63, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 69, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 80, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 85, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 97, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 98, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 98, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 98, "usage_type": "attribute"}, {"api_name": "Controllers.serializer.Serializer", "line_number": 100, "usage_type": "call"}, {"api_name": "Controllers.RequestCOTController.RequestCOTController", "line_number": 100, "usage_type": "call"}, {"api_name": "socket.SHUT_RDWR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "threading.enumerate", "line_number": 105, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 109, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 129, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 129, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 174, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 178, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 187, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 187, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 192, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 209, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 220, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 221, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 223, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 229, "usage_type": "call"}, {"api_name": "socket.SHUT_RDWR", "line_number": 240, "usage_type": "attribute"}, {"api_name": "socket.SHUT_RDWR", "line_number": 244, "usage_type": "attribute"}, {"api_name": "Controllers.serializer.Serializer", "line_number": 263, "usage_type": "call"}, {"api_name": "Controllers.RequestCOTController.RequestCOTController", "line_number": 263, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 265, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 274, "usage_type": "call"}, {"api_name": "socket.SHUT_RDWR", "line_number": 279, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 295, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 303, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 308, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 309, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 315, "usage_type": "call"}, {"api_name": "Controllers.serializer.Serializer", "line_number": 327, "usage_type": "call"}, {"api_name": "Controllers.RequestCOTController.RequestCOTController", "line_number": 327, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 327, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 329, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 342, "usage_type": "call"}]} {"seq_id": "376546621", "text": "from collections import Counter\nfrom typing import List\n\n\nclass BingoCard:\n \"\"\"Represents a bingo card.\"\"\"\n\n WIDTH = 5\n MARKED = -1\n MARKED_WIN = MARKED * WIDTH\n\n def __init__(self, lines: List[str]) -> None:\n \"\"\"Initialize the bingo card with its lines.\n\n Args:\n lines (List[str]): lines of the bingo card\n\n \"\"\"\n self.rows: List[List[int]] = []\n for line in lines:\n self.rows.append([int(num) for num in line.split()])\n\n def mark(self, marker: int) -> None:\n \"\"\"Mark the bingo board with a marker number.\n\n Args:\n marker (int): the called number\n\n \"\"\"\n self.rows = [\n [num if num != marker else self.MARKED for num in row]\n for row in self.rows\n ]\n\n def is_winning(self, numbers: List[int]) -> bool:\n \"\"\"Determine if the sequence of numbers is a win.\n\n Returns:\n bool: the sequence of numbers is summed and only contains marked\n numbers\n\n \"\"\"\n count = Counter(numbers)\n return len(count) == 1 and sum(numbers) == self.MARKED_WIN\n\n def has_won(self) -> bool:\n \"\"\"Check if the bingo card has won.\n\n Returns:\n bool: True if the board is a winner; False otherwise\n\n \"\"\"\n won_row = any([self.is_winning(row) for row in self.rows])\n cols = [\n [row[n] for row in self.rows]\n for n, _ in enumerate(self.rows[0])\n ]\n won_col = any([self.is_winning(col) for col in cols])\n\n # RIP my reading comprehension\n # diag_l = []\n # diag_r = []\n # for n in range(self.WIDTH):\n # diag_l.append(self.rows[n][n])\n # diag_r.append(self.rows[n][self.WIDTH - n - 1])\n # won_diag = any(\n # [self.is_winning(diag_l), self.is_winning(diag_r)]\n # )\n\n return any([won_row, won_col])\n\n def sum_all_unmarked(self) -> int:\n \"\"\"Sum all unmarked bingo numbers.\n\n Returns:\n int: sum of all unmarked numbers\n\n \"\"\"\n rows = [[n for n in row if n != self.MARKED] for row in self.rows]\n return sum([sum(row) for row in rows])\n", "sub_path": "2021/04/bingo.py", "file_name": "bingo.py", "file_ext": "py", "file_size_in_byte": 2220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 43, "usage_type": "call"}]} {"seq_id": "590716937", "text": "\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\ndef target(w, x, y): #target function of log likehood method\n ret = 0\n n = len(x)\n for i in range(0,n):\n #print(x[i], w)\n rate = w.dot(x[i])\n #print(w, x[i], rate)\n ret += -rate*y[i] + math.log(1+math.exp(rate))\n return ret\npass\ndef test(w, testx, testy): #test and return accuracy\n n = len(testx)\n sum = 0\n for i in range(0,n):\n rate = w.dot(testx[i])\n #print(\"here\",w, testx[i], rate)\n activation = 1./(math.exp(-rate)+1.)\n #print (rate)\n #print (\"act\", activation)\n if activation < 0.5 and testy[i] == 0 or (activation > 0.5 and testy[i] == 1):\n sum += 1\n elif activation == 0.5:\n sum += 0.5\n return sum/n\npass\ndef newton(weight, trainx, trainy):#Newton optimization of log likehood\n cur = 0\n n = len(trainx[0])\n while 1:\n old = cur\n #print(weight, trainx, trainy)\n cur = target(weight, trainx, trainy)\n print(cur, old)\n if abs(old-cur) <= 0.000001:\n break\n dl = np.matrix(np.zeros(n)).astype(float)\n ddl = np.zeros((n,n)).astype(float)\n #print (n,dl, ddl)\n for i in range(0,len(trainx)):\n rate = weight.dot(trainx[i])\n #print(weight, trainx[i], weight.dot(trainx[i]))\n p = 1. - 1./(1.+math.exp(rate))\n #print(\"p=\", p)\n #print(dl)\n dl += (-trainy[i]+p)*trainx[i]\n #print (i, np.matrix(trainx[i]).T, trainx[i], np.matrix(trainx[i]).T*trainx[i])\n ddl += np.matrix(trainx[i]).T*trainx[i]*p*(1-p)\n #print (\"dl,ddl=\",dl, np.linalg.pinv(ddl),dl*np.linalg.pinv(ddl))\n weight -= dl*np.linalg.pinv(ddl)\n #print(cur, weight)\n #print(test(weight, testx, testy))\n return weight\npass\ndef kFoldeCrossValidation(k, data, y):\n n = len(data[0])\n weight_final = np.zeros(n).astype(float)\n weight_final = np.matrix(weight_final)\n for it in range(0,k):\n weight = np.zeros(n).astype(float)#initialize train data and test data\n weight = np.matrix(weight)\n trainx = data[:math.floor(n*it/k)] + data[math.floor(n*(it+1)/k):]\n trainy = y[:math.floor(n*it/k)] + y[math.floor(n*(it+1)/k):]\n testx = data[math.floor(n*it/k):math.floor(n*(it+1)/k)]\n testy = y[math.floor(n*it/k):math.floor(n*(it+1)/k)]\n \n testx = np.array(testx).astype(float)#transfer data into array structure\n testy = np.array(testy).astype(int)\n trainx = np.array(trainx).astype(float)\n trainy = np.array(trainy).astype(int)\n #print(trainx, trainy)\n weight = newton(weight, trainx, trainy)\n\n weight_final += weight\n weight_final /= k\n data = np.array(data).astype(float)\n y = np.array(y).astype(int)\n print(test(weight_final, data, y))\n print(weight_final)\n return weight_final\npass\ndef Virtualization(data, y, weight_final): #Virtualization the scatter plot and draw the classification line,ONLY WORKS IN 2D\n x0,y0,x1,y1 = [],[],[],[]\n data = np.array(data).astype(float)\n y = np.array(y).astype(int)#transfer data into numerical foramt\n for i in range(0,n):#classify two kinds of data\n if y[i] == 1:\n x0.append(data[i][0])\n y0.append(data[i][1])\n else:\n x1.append(data[i][0])\n y1.append(data[i][1])\n fig = plt.figure()\n ax = fig.add_subplot(1,1, 1)\n #print(x0, y0)\n ax.set_title(\"Logic Regression of Melon Example\")\n ax.set_xlabel(\"Density of melons\")\n ax.set_ylabel(\"Sugar content\")\n ax.scatter(x0,y0, label = 'Bad Melons', marker = 'o')#Plot scatter\n ax.scatter(x1,y1, label = 'Good Melons', marker = 's')\n weight_final = np.array(weight_final)\n X = []\n Y = []\n for i in [1,9]:\n X.append(i*0.1)\n Y.append(-(i*0.1*weight_final[0][0] + weight_final[0][2])/weight_final[0][1])\n print (X,Y)\n ax.plot(X,Y, label = 'classification line')#Plot classification line\n ax.legend()\n plt.show() \n\nfile = open(\"data.txt\")\ndata = []\ny = []\nn = 0\nwhile 1:#read data\n line = file.readline().split()\n if not line:\n break\n pass\n #line = line.split(' ')\n k = len(line)\n #print (k, data, y)\n lines = []\n for i in range(1,k-1):\n lines.append(line[i])\n lines.append(1)\n data.append(lines)\n if line[-1] == '0':\n y.append(0)\n else:\n y.append(1)\n n += 1\n\nprint(data, y)\nweight_final = kFoldeCrossValidation(10, data, y)\n\n\nVirtualization(data, y, weight_final)", "sub_path": "Chapter3/3.3/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 4595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "math.log", "line_number": 13, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 13, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 65, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 66, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 67, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 68, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}]} {"seq_id": "482229991", "text": "from validate import validate_article_row\nfrom model.articles import Articles\nfrom peewee import IntegrityError\nfrom logger import logging\nfrom datetime import datetime\n\n\nclass Scraper:\n def __init__(self, name):\n self.name = name\n\n def fetch(self):\n raise NotImplemented\n\n @staticmethod\n def insert_article(article_row):\n validate_article_row(article_row)\n\n # insert into db\n q = Articles.insert(**article_row)\n try:\n q.execute()\n except IntegrityError:\n logging.debug('Skipping duplicate entry: {0}, {1}'.format(article_row['source'], article_row['date_pub']))\n\ntrend_stop_words = []\n", "sub_path": "scraper/article_sources/scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "validate.validate_article_row", "line_number": 17, "usage_type": "call"}, {"api_name": "model.articles.Articles.insert", "line_number": 20, "usage_type": "call"}, {"api_name": "model.articles.Articles", "line_number": 20, "usage_type": "name"}, {"api_name": "peewee.IntegrityError", "line_number": 23, "usage_type": "name"}, {"api_name": "logger.logging.debug", "line_number": 24, "usage_type": "call"}, {"api_name": "logger.logging", "line_number": 24, "usage_type": "name"}]} {"seq_id": "121893039", "text": "import requests\nimport json\nfrom queue import Queue\nimport os\nimport time\nfrom lxml import etree\nimport threading\nimport pymongo\nimport urllib3\nfrom retrying import retry\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nimport re\n\n\n\nclass SinaNews:\n def __init__(self):\n self.headers = {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Host\": \"feed.mix.sina.com.cn\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"http://tech.sina.com.cn/chuangshiji/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n }\n self.details_headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"max-age=0\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"tech.sina.com.cn\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\"\n }\n self.proxies = {\n \"https\": \"120.24.68.197:16818\",\n \"http\": \"120.24.68.197:16818\"\n }\n self.MONGO_URL = \"192.168.0.8\"\n self.MONGO_PORT = 27017\n self.MONGO_DB = \"sina_news\"\n self.MONGO_COLLECTION = \"sina_news_list\"\n self.client = pymongo.MongoClient(self.MONGO_URL)\n self.db = self.client[self.MONGO_DB]\n self.coll = self.db[self.MONGO_COLLECTION]\n self.url_queue = Queue()\n self.cy_url_queue = Queue()\n self.content = Queue()\n self.cy_content = Queue()\n self.data = Queue()\n self.image_news_queue = Queue()\n\n @retry(stop_max_attempt_number=4)\n def _parse_url(self, url, headers, return_str=False):\n r = requests.get(url, headers=headers, proxies=self.proxies, timeout=5)\n # assert r.status_code == 200\n return r.content.decode(\"utf-8\")\n\n # @retry(stop_max_attempt_number=5)\n def get_news_list(self):\n for i in range(1):\n start_url = \"http://feed.mix.sina.com.cn/api/roll/get?pageid=223&lid=23&num=50&versionNumber=1.2.8&page{}\"\n response = self._parse_url(start_url.format(i), headers=self.headers)\n item_url_dict = json.loads(response)[\"result\"][\"data\"]\n\n for k in item_url_dict:\n # print(k)\n details_url = (k[\"url\"]).replace('[\"', \"\").replace('\"]', \"\").replace('\\\\', \"\")\n if \"internet\" not in details_url or \"http://slide.tech.sina.com.cn/\" not in details_url:\n self.url_queue.put(details_url)\n\n # @retry(stop_max_attempt_number=5)\n def get_details_page(self):\n \"\"\"chuang shi ji \"\"\"\n while True:\n page_url = self.url_queue.get()\n print(page_url)\n if \"csj\" in page_url:\n # print(page_url)\n html = self._parse_url(url=page_url, headers=self.details_headers)\n print(page_url)\n time.sleep(1)\n self.content.put(html)\n else:\n # print(page_url)\n html = self._parse_url(url=page_url, headers=self.details_headers)\n print(page_url)\n time.sleep(1)\n self.cy_content.put(html)\n\n self.url_queue.task_done()\n\n def parse_cy_item(self):\n while True:\n doc = self.cy_content.get()\n text = etree.HTML(doc)\n news_title = text.xpath('//div[@class=\"second-title\"]/text()') if text.xpath(\n '//div[@class=\"second-title\"]/text()') else None\n if news_title is not None:\n news_title = news_title[0]\n\n news_date = text.xpath('//div[@class=\"date-source\"]/span[1]/text()') if text.xpath(\n '//div[@class=\"date-source\"]/span[1]/text()') else None\n if news_date is not None:\n news_date = news_date[0]\n\n news_author = text.xpath('//div[@class=\"date-source\"]/span[2]/text()') if text.xpath(\n '//div[@class=\"date-source\"]/span[2]/text()') else None\n if news_author is not None:\n news_author = news_author[0]\n else:\n news_author = text.xpath('//div[@class=\"date-source\"]/span[2]/a/text()') if text.xpath(\n '//div[@class=\"date-source\"]/span[2]/a/text()') else None\n if news_author is not None:\n news_author = news_author[0]\n\n news_tag = text.xpath('//div[@id=\"keywords\"]/a/text()') if text.xpath(\n '//div[@id=\"keywords\"]/a/text()') else []\n\n news_url = text.xpath('//meta[@property=\"toutiao\"]/@content') if text.xpath(\n '//meta[@property=\"toutiao\"]/@content') else None\n if news_url is not None:\n news_url = news_url[0]\n\n news_cate = text.xpath('//div[@class=\"path\"]//span[@class=\"spliter\"]/a/text()') if text.xpath(\n '//div[@class=\"path\"]//span[@class=\"spliter\"]/a/text()') else None\n if news_cate is not None:\n news_cate = news_cate[0]\n\n news_content = text.xpath('//div[@id=\"artibody\"]') if text.xpath('//div[@id=\"artibody\"]') else None\n if news_content is not None:\n news_content = news_content[0]\n content = etree.tostring(news_content, encoding=\"utf-8\").decode(\"utf-8\")\n\n data = {\n \"title\": news_title,\n \"time\": news_date,\n \"categories\": news_cate,\n \"url\": news_url,\n \"author\": news_author,\n \"tag\": news_tag,\n \"content\": content,\n }\n self.data.put(data)\n self.cy_content.task_done()\n\n def parse_item(self):\n \"\"\"\n 1. ๅˆ›ไบ‹่ฎฐ้กต้ข่งฃๆž\n :return:\n \"\"\"\n while True:\n doc = self.content.get()\n text = etree.HTML(doc)\n news_title = text.xpath('//h1[@id=\"artibodyTitle\"]/text()') if text.xpath(\n '//h1[@id=\"artibodyTitle\"]/text()') else None\n if news_title is not None:\n news_title = news_title[0]\n\n news_date = text.xpath('//span[@id=\"pub_date\"]/text()') if text.xpath(\n '//span[@id=\"pub_date\"]/text()') else None\n if news_date is not None:\n news_date = news_date[0].strip()\n\n news_cate = text.xpath('//span[@id=\"media_name\"]/a[1]/text()') if text.xpath(\n '//span[@id=\"media_name\"]/a[1]/text()') else None\n if news_cate is not None:\n news_cate = news_cate[0]\n\n news_author = text.xpath('//span[@id=\"author_ename\"]/a/text()') if text.xpath(\n '//span[@id=\"author_ename\"]/a/text()') else None\n if news_author is not None:\n news_author = news_author[0]\n\n news_tag = text.xpath('//p[@class=\"art_keywords\"]/a/text()') if text.xpath(\n '//p[@class=\"art_keywords\"]/a/text()') else None\n if news_tag is not None:\n news_tag = news_tag\n\n news_url = text.xpath('//div[@class=\"blk_tw\"]/h3/a/@href') if text.xpath(\n '//div[@class=\"blk_tw\"]/h3/a/@href') else None\n if news_url is not None:\n news_url = news_url[0]\n else:\n news_url = text.xpath('//meta[@property=\"toutiao\"]/@content') if text.xpath(\n '//meta[@property=\"toutiao\"]/@content') else None\n if news_url is not None:\n news_url = news_url[0]\n\n news_content = text.xpath('//div[@id=\"artibody\"]') if text.xpath('//div[@id=\"artibody\"]') else None\n if news_content is not None:\n news_content = news_content[0]\n content = etree.tostring(news_content, encoding=\"utf-8\").decode(\"utf-8\")\n\n data = {\n \"title\": news_title,\n \"time\": news_date,\n \"categories\": news_cate,\n \"url\": news_url,\n \"author\": news_author,\n \"tag\": news_tag,\n \"content\": content,\n }\n self.data.put(data)\n self.content.task_done()\n\n def save_data(self):\n while True:\n data = self.data.get()\n try:\n if self.db[self.MONGO_COLLECTION].insert(data):\n print('ๅญ˜ๅ‚จๅˆฐMongoDBๆˆๅŠŸ')\n except Exception as e:\n print(e)\n print('ๅญ˜ๅ‚จๅˆฐMongoDBๅคฑ่ดฅ')\n self.data.task_done()\n\n def run(self):\n self.get_news_list()\n\n thread_list = []\n for i in range(1):\n t2 = threading.Thread(target=self.get_details_page)\n thread_list.append(t2)\n\n # for i in range(1):\n # t3 = threading.Thread(target=self.parse_item)\n # thread_list.append(t3)\n #\n # for i in range(1):\n # t4 = threading.Thread(target=self.parse_cy_item)\n # thread_list.append(t4)\n #\n # for i in range(1):\n # t5 = threading.Thread(target=self.save_data)\n # thread_list.append(t5)\n #\n # for t in thread_list:\n # t.setDaemon(True) # ่ฎพ็ฝฎๅฎˆๆŠค็บฟ็จ‹๏ผŒ่ฏดๆ˜Ž่ฏฅ็บฟ็จ‹ไธ้‡่ฆ๏ผŒไธป็บฟ็จ‹็ป“ๆŸ๏ผŒๅญ็บฟ็จ‹็ป“ๆŸ\n # t.start() # ็บฟ็จ‹ๅฏๅŠจ\n #\n # for q in [self.url_queue, self.content, self.data]:\n # q.join() # ็ญ‰ๅพ…๏ผŒ่ฎฉไธป็บฟ็จ‹็ญ‰ๅพ…๏ผŒ้˜Ÿๅˆ—่ฎกๆ•ฐไธบ0ไน‹ๅŽๆ‰ไผš็ป“ๆŸ๏ผŒๅฆๅˆ™ไผšไธ€็›ด็ญ‰ๅพ…\n\n\nif __name__ == '__main__':\n sn = SinaNews()\n sn.run()\n", "sub_path": "sina_news/tele_spider.py", "file_name": "tele_spider.py", "file_ext": "py", "file_size_in_byte": 9949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 45, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 48, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 49, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 50, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 51, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 52, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 57, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 90, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 98, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 98, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 135, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 135, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 156, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 156, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 195, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 195, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 225, "usage_type": "call"}]} {"seq_id": "529808265", "text": "from autoslug import AutoSlugField\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.translation import ugettext_lazy as _\nfrom tinymce.models import HTMLField\n\nfrom raporty_siecobywatelska_pl.ranking.models import Ranking\n\n\n@python_2_unicode_compatible\nclass Group(models.Model):\n name = models.CharField(\n max_length=250,\n verbose_name=_(\"Name\")\n )\n description = HTMLField(\n verbose_name=_(\"Description\")\n )\n slug = AutoSlugField(\n populate_from='name',\n verbose_name=_(\"Slug\"),\n unique=True\n )\n ranking = models.ForeignKey(\n to=Ranking,\n on_delete=models.CASCADE\n )\n\n class Meta:\n verbose_name = _(\"Question group\")\n verbose_name_plural = _(\"Question groups\")\n ordering = ['name']\n\n def get_absolute_url(self):\n return reverse('questionnaire:ranking-group-detail', args=[self.ranking.slug, self.slug])\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Question(models.Model):\n name = models.CharField(max_length=250, verbose_name=_(\"Name\"))\n description = HTMLField(verbose_name=_(\"Description\"))\n\n group = models.ForeignKey(Group, on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Question\")\n verbose_name_plural = _(\"Question\")\n ordering = ['name']\n\n def get_absolute_url(self):\n return reverse('rankings:detail', kwargs={'slug': self.group.ranking.slug})\n\n def __str__(self):\n return self.name\n", "sub_path": "raporty_siecobywatelska_pl/questionnaire/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "django.db.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 15, "usage_type": "call"}, {"api_name": "tinymce.models.HTMLField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 18, "usage_type": "call"}, {"api_name": "autoslug.AutoSlugField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "raporty_siecobywatelska_pl.ranking.models.Ranking", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 31, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 32, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 36, "usage_type": "call"}, {"api_name": "django.utils.encoding.python_2_unicode_compatible", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 44, "usage_type": "call"}, {"api_name": "tinymce.models.HTMLField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 50, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 51, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.encoding.python_2_unicode_compatible", "line_number": 42, "usage_type": "name"}]} {"seq_id": "613868575", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\"\"\" A Tank Game to teach coding those unfamiliar with it\"\"\"\r\n\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import *\r\nfrom PIL import Image\r\nimport random\r\nimport time\r\n\r\npygame.init()\r\n\r\n# Set the width and height of the screen\r\n\r\ninfoObject = pygame.display.Info()\r\nsize = (infoObject.current_w, infoObject.current_h - 30)\r\n\r\n# Used to manage how fast the screen updates\r\n\r\nclock = pygame.time.Clock()\r\nclass tank:\r\n\r\n def __init__(self):\r\n self.origional_image = pygame.image.load('KV-2_Tank.png')\r\n self.image = self.origional_image\r\n self.rotation = 0\r\n self.coords = (0, 0)\r\n self.move_count = 0 # # Counts the number of tims a function is iterated\r\n\r\n def rotate(self, degrees):\r\n \"\"\" This program will rotate 1 degree everytime called, untill it has\r\n reached the desired rotation, then it will advance the program counter\r\n \"\"\"\r\n\r\n global program_counter\r\n\r\n if degrees > 0 and self.move_count < degrees:\r\n self.rotation += 1\r\n self.move_count += 1\r\n elif degrees < 0 and self.move_count > degrees:\r\n self.rotation -= 1\r\n self.move_count -= 1\r\n else:\r\n self.move_count = 0\r\n program_counter += 1\r\n\r\n # # Converting large negetive rotations to the corresponding small positive ones\r\n\r\n if abs(self.rotation) > 180:\r\n if self.rotation < 0:\r\n self.rotation = 360 + self.rotation\r\n elif self.rotation > 0:\r\n self.rotation = -360 + self.rotation\r\n\r\n def move(self, distance):\r\n \"\"\" This program will move 10 pixels everytime called, untill it has\r\n reached the desired position, then it will advance the program counter\r\n \"\"\"\r\n\r\n global program_counter\r\n\r\n self.move_count += 1\r\n (x, y) = self.coords\r\n\r\n if distance < 0:\r\n factor = -1\r\n elif distance == 0:\r\n factor = 0\r\n else:\r\n factor = 1\r\n\r\n if self.move_count < abs(distance):\r\n y += factor * -(5 - abs(self.rotation) / 18)\r\n if abs(self.rotation) <= 90:\r\n x -= factor * self.rotation / 18\r\n elif self.rotation > 0:\r\n x -= factor * (180 - self.rotation) / 18\r\n else:\r\n x -= factor * -(180 - abs(self.rotation)) / 18\r\n else:\r\n\r\n program_counter += 1\r\n self.move_count = 0\r\n\r\n self.coords = (x, y)\r\n\r\n def fire(self):\r\n \"\"\" Fires a bullet from the tank\"\"\"\r\n \r\n global program_counter\r\n global bullet_list\r\n x,y = self.coords\r\n bullet_list.append(bullet((x, y- 1/2 * self.image.get_height()), self.rotation))\r\n program_counter += 1\r\n\r\n def delay(self, milliseconds):\r\n \"\"\"Delays the tank's movement\"\"\"\r\n \r\n global program_counter\r\n time.sleep(milliseconds * 0.001)\r\n program_counter += 1\r\n\r\n def draw(self):\r\n \"\"\" Draws the image of the tank on the playing surface\"\"\"\r\n\r\n self.image = _rot_center(self.origional_image, self.rotation)\r\n screen.blit(self.image, self.coords)\r\n\r\n\r\nclass bullet:\r\n\r\n def __init__(self, coordinates, rotation):\r\n self.origional_image = \\\r\n pygame.transform.scale(pygame.image.load('bullet.png'),\r\n (20, 10))\r\n self.image = self.origional_image\r\n self.rotation = rotation\r\n self.coords = coordinates\r\n self.move_count = 0 ## Counts the number of tims a function is iterated\r\n\r\n def move(self, distance):\r\n \"\"\" This program will move 10 pixels everytime called, untill it has\r\n reached the desired position, then it will advance the program counter\r\n \"\"\"\r\n\r\n self.move_count += 1\r\n (x, y) = self.coords\r\n\r\n if distance < 0:\r\n factor = -1\r\n elif distance == 0:\r\n factor = 0\r\n else:\r\n factor = 1\r\n\r\n if self.move_count < abs(distance):\r\n y += factor * -(5 - abs(self.rotation) / 18)\r\n if abs(self.rotation) <= 90:\r\n x -= factor * self.rotation / 18\r\n elif self.rotation > 0:\r\n x -= factor * (180 - self.rotation) / 18\r\n else:\r\n x -= factor * -(180 - abs(self.rotation)) / 18\r\n else:\r\n\r\n self.move_count = 0\r\n\r\n self.coords = (x, y)\r\n\r\n def draw(self):\r\n \"\"\" Draws the image of the tank on the playing surface\"\"\"\r\n\r\n self.image = _rot_center(self.origional_image, self.rotation\r\n + 90)\r\n screen.blit(self.image, self.coords)\r\n\r\n\r\ndef _rot_center(pygame_image, degrees):\r\n \"\"\" Rotate an image while keeping its center and size, by converting to pil, rotating and back\"\"\"\r\n\r\n image_string = pygame.image.tostring(pygame_image, 'RGBA', False)\r\n PIL_image = Image.frombytes('RGBA', pygame_image.get_size(),\r\n image_string)\r\n PIL_image = PIL_image.rotate(degrees)\r\n image_string = PIL_image.tobytes()\r\n pygame_image = pygame.image.fromstring(image_string,\r\n PIL_image.size, 'RGBA')\r\n return pygame_image\r\n\r\n\r\ndef get_program(nest):\r\n \"\"\" This function gets the tank program from the user\"\"\"\r\n\r\n user_in = True\r\n program = []\r\n\r\n # # Gets program from user\r\n\r\n while user_in:\r\n user_in = input('>>> ' + nest * ' ')\r\n if 'tank.' in user_in:\r\n user_in = 'tank_program.append(\"' + user_in + '\")'\r\n\r\n # # If a conditional, it runs 'get_program' to get the conditional\r\n # # the code under the conditional,and appends it as one string\r\n # # separated by '\\n' to the program list. It counts the number of\r\n # # 'nests' to print the correct number of tabs.\r\n\r\n if ':' in user_in:\r\n nested_program = get_program(nest + 1)\r\n for command in nested_program:\r\n user_in += '\\n' + (nest + 1) * ' ' + command\r\n program.append(user_in)\r\n return program\r\n\r\n\r\ndef start(program):\r\n \"\"\" This function is called, given the input data, collected from the user\r\n and processed by 'get_program'. Given this list of commands, the computer\r\n moves the tank on the screen.\r\n \"\"\"\r\n\r\n # # Calculating the moves of the Tank. Compiling\r\n\r\n global program_counter\r\n while True:\r\n if program_counter < len(program) - 1:\r\n exec(program[program_counter])\r\n program_counter += 1\r\n else:\r\n break\r\n\r\n # # Executing the moves of the tank. Interpreting\r\n\r\n program_counter = 0\r\n while True:\r\n if program_counter < len(tank_program):\r\n print(tank_program[program_counter])\r\n exec(tank_program[program_counter])\r\n screen.fill((255, 255, 255))\r\n for bullet in bullet_list:\r\n bullet.move(10)\r\n bullet.draw()\r\n \r\n tank.draw()\r\n pygame.display.flip()\r\n time.sleep(0.001)\r\n clock.tick(60)\r\n\r\n\r\nprogram = get_program(0)\r\ntank_program = []\r\nprogram_counter = 0\r\ntank = tank()\r\nbullet_list = []\r\n\r\nscreen = pygame.display.set_mode(size) # , FULLSCREEN)\r\nstart(program)\r\n# Close the window and quit.\r\npygame.quit()\r\n", "sub_path": "Tank Game/Outdated/Tank Game compiled.py", "file_name": "Tank Game compiled.py", "file_ext": "py", "file_size_in_byte": 7414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "pygame.init", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display.Info", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.image.tostring", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 162, "usage_type": "attribute"}, {"api_name": "PIL.Image.frombytes", "line_number": 163, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 163, "usage_type": "name"}, {"api_name": "pygame.image.fromstring", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 227, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 227, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 228, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 238, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 238, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 241, "usage_type": "call"}]} {"seq_id": "67749111", "text": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass Exclusive(CloudFormationLintRule):\n \"\"\"Check Properties Resource Configuration\"\"\"\n id = 'E2520'\n shortdesc = 'Check Properties that are mutually exclusive'\n description = 'Making sure CloudFormation properties ' + \\\n 'that are exclusive are not defined'\n tags = ['base', 'resources']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n self.exlusivespec = cfnlint.helpers.load_resources('data/ResourcePropertiesExclusive.json')\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n for excl_type, excl_values in self.exlusivespec.items():\n for res_name, res_value in cfn.get_resources(excl_type).items():\n for excl_name, excl_value in excl_values.items():\n properties = res_value.get('Properties', {})\n if excl_name in properties:\n for prop_name in excl_value:\n if prop_name in res_value['Properties']:\n message = \"Parameter {0} shouldn't exist with {1} for {2}\"\n matches.append(RuleMatch(\n ['Resources', res_name, 'Properties', excl_name],\n message.format(excl_name, prop_name, res_name)\n ))\n\n return matches\n", "sub_path": "src/cfnlint/rules/resources/properties/Exclusive.py", "file_name": "Exclusive.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "cfnlint.CloudFormationLintRule", "line_number": 22, "usage_type": "name"}, {"api_name": "cfnlint.helpers.load_resources", "line_number": 32, "usage_type": "call"}, {"api_name": "cfnlint.helpers", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cfnlint.RuleMatch", "line_number": 46, "usage_type": "call"}]} {"seq_id": "102395242", "text": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2013-2016 Vertel AB \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport logging\nfrom odoo import api, models, fields, _\nfrom .skv import SKVTransaktionsrapportType1 as Parser\nimport base64\nimport re\n\nfrom datetime import timedelta\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass AccountBankStatementImport(models.TransientModel):\n \"\"\"Add seb method to account.bank.statement.import.\"\"\"\n _inherit = 'account.statement.import'\n\n @api.model\n def _parse_file(self, statement_file):\n \"\"\"Parse one file or multiple files from zip-file.\n Return array of statements for further processing.\n xlsx-files are a Zip-file, have to override\n \"\"\"\n statements = []\n files = [statement_file]\n\n try:\n _logger.info(u\"Try parsing with SEB Typ 1 Kontohรคndelser.\")\n parser = Parser(base64.b64decode(self.statement_file))\n except ValueError:\n # Not a SEB Type 1 file, returning super will call next candidate:\n _logger.info(u\"Statement file was not a SEB Type 1 Kontohรคndelse file.\")\n try:\n _logger.info(u\"Try parsing with SEB Typ 2 Kontohรคndelser.\")\n parser = Parser2(base64.b64decode(self.statement_file))\n except ValueError:\n # Not a SEB Type 2 file, returning super will call next candidate:\n _logger.info(u\"Statement file was not a SEB Type 2 Kontohรคndelse file.\")\n try:\n _logger.info(u\"Try parsing with SEB Typ 3 Kontohรคndelser.\")\n parser = Parser3(base64.b64decode(self.statement_file))\n except ValueError:\n # Not a SEB Type 3 file, returning super will call next candidate:\n _logger.info(u\"Statement file was not a SEB Type 3 Kontohรคndelse file.\")\n return super(AccountBankStatementImport, self)._parse_file(statement_file)\n\n fakt = re.compile('\\d+') # Pattern to find invoice numbers\n\n\n\n seb = parser.parse()\n for s in seb.statements:\n currency = self.env['res.currency'].search([('name','=',s['currency_code'])])\n account = self.env['res.partner.bank'].search([('acc_number','=',s['account_number'])]).mapped('journal_id').mapped('default_debit_account_id')\n move_line_ids = []\n for t in s['transactions']:\n if s['currency_code'] != currency.name:\n t['currency_id'] = currency.id\n partner_id = self.env['res.partner'].search(['|',('name','ilike',t['partner_name']),('ref','ilike',t['partner_name'])]) # ,('ref','ilike',t['partner_name']),('phone','ilike',t['partner_name'])])\n if partner_id:\n t['account_number'] = partner_id[0].commercial_partner_id.bank_ids and partner_id[0].commercial_partner_id.bank_ids[0].acc_number or ''\n t['partner_id'] = partner_id[0].commercial_partner_id.id\n fnr = '-'.join(fakt.findall(t['name']))\n invoice = None\n #~ if fnr:\n #~ invoice = self.env['account.invoice'].search(['|',('name','ilike',fnr),('supplier_invoice_number','ilike',fnr)])\n #~ if invoice:\n #~ t['account_number'] = invoice[0] and invoice[0].partner_id.bank_ids and invoice[0].partner_id.bank_ids[0].acc_number or ''\n #~ t['partner_id'] = invoice[0] and invoice[0].partner_id.id or None\n # account.voucher / account.move t['journal_entry_id']\n d1 = fields.Date.to_string(fields.Date.from_string(t['date']) - timedelta(days=5))\n d2 = fields.Date.to_string(fields.Date.from_string(t['date']) + timedelta(days=40))\n vouchers = self.env['account.voucher'].search([('date','>',d1),('date','<',d2), ('account_id', '=', account.id)])\n voucher = None\n if len(vouchers) > 0:\n voucher_partner = vouchers.filtered(lambda v: v.partner_id == partner_id and round(v.amount, -1) == round(t['amount'], -1))\n if len(voucher_partner) > 0:\n voucher = voucher_partner[0]\n else:\n voucher = vouchers.filtered(lambda v: round(v.amount, -1) == round(t['amount'], -1))[0] if vouchers.filtered(lambda v: round(v.amount, -1) == round(t['amount'], -1)) else None\n if not invoice or not voucher: # match with account.move\n #~ lines = self.env['account.move'].search([('date','>',d1),('date','<',d2)]).filtered(lambda v: round(v.amount,-1) == round(t['amount'],-1)).mapped('line_id').filtered(lambda l: l.account_id.id == account and (account.id))\n line = self.env['account.move'].search([('date','>',d1),('date','<',d2)]).mapped('line_ids').filtered(lambda l: l.account_id == account and round(l.debit-l.credit, -1) == round(t['amount'], -1))\n if len(line)>0:\n #~ _logger.error(line.mapped('move_id'))\n #~ _logger.error(account.mapped('code'))\n if line[0].move_id.state == 'draft' and line[0].move_id.date != t['date']:\n line[0].move_id.date = t['date']\n move = line.mapped('move_id')[0] if len(line)>0 else None\n if move:\n t['journal_entry_id'] = move.id\n for line in move.line_ids:\n move_line_ids.append(line)\n t['voucher_id'] = self.env['account.voucher'].search([('move_id', '=', move.id)]).id if self.env['account.voucher'].search([('move_id', '=', move.id)]) else None\n elif voucher: # match with account.voucher\n if voucher.move_id.state == 'draft' and voucher.move_id.date != t['date']:\n voucher.move_id.date = t['date']\n if voucher.state == 'draft' and voucher.date != t['date']:\n voucher.date = t['date']\n t['journal_entry_id'] = voucher.move_id.id\n for line in voucher.move_id.line_ids:\n move_line_ids.append(line)\n t['voucher_id'] = voucher.id\n elif invoice: # match with account.invoice\n line = invoice.payment_ids.filtered(lambda l: l.date > d1 and l.date < d2 and round(l.debit-l.credit, -1) == round(t['amount'], -1))\n if len(line) > 0:\n if line[0].move_id.state == 'draft' and line[0].move_id.date != t['date']:\n line[0].move_id.date = t['date']\n t['journal_entry_id'] = line[0].move_id.id\n for line in line[0].move_id.line_ids:\n move_line_ids.append(line)\n s['move_line_ids'] = [(6, 0, [l.id for l in move_line_ids])]\n\n #~ res = parser.parse(statement_file)\n _logger.debug(\"res: %s\" % seb.statements)\n #~ raise Warning(seb.statements)\n return seb.account_currency, seb.account_number, seb.statements\n\nclass account_bank_statement(models.Model):\n _inherit = 'account.bank.statement.line'\n\n def Xget_move_lines_for_reconciliation_by_statement_line_id(self, st_line_id, excluded_ids=None, str=False, offset=0, limit=None, count=False, additional_domain=None):\n \"\"\" Bridge between the web client reconciliation widget and get_move_lines_for_reconciliation (which expects a browse record) \"\"\"\n if excluded_ids is None:\n excluded_ids = []\n if additional_domain is None:\n additional_domain = []\n st_line = self.browse(st_line_id)\n return self.get_move_lines_for_reconciliation(st_line, excluded_ids, str, offset, limit, count, additional_domain)\n\n def Xget_move_lines_for_reconciliation(self, st_line, excluded_ids=None, str=False, offset=0, limit=None, count=False, additional_domain=None):\n \"\"\" Find the move lines that could be used to reconcile a statement line. If count is true, only returns the count.\n\n :param st_line: the browse record of the statement line\n :param integers list excluded_ids: ids of move lines that should not be fetched\n :param boolean count: just return the number of records\n :param tuples list additional_domain: additional domain restrictions\n \"\"\"\n _logger.warn('st_line: %s' % st_line)\n _logger.warn('st_line.name: %s' % st_line.name)\n _logger.warn('st_line.currency_id: %s' % st_line.currency_id)\n _logger.warn('excluded_ids: %s' % excluded_ids)\n _logger.warn('str: %s' % str)\n _logger.warn('limit: %s' % limit)\n _logger.warn('additional_domain: %s' % additional_domain)\n mv_line_pool = self.pool.get('account.move.line')\n domain = self._domain_move_lines_for_reconciliation(st_line, excluded_ids=excluded_ids, str=str, additional_domain=additional_domain)\n _logger.warn('domain: %s' % domain)\n # Get move lines ; in case of a partial reconciliation, only keep one line (the first whose amount is greater than\n # the residual amount because it is presumably the invoice, which is the relevant item in this situation)\n filtered_lines = []\n reconcile_partial_ids = []\n actual_offset = offset\n while True:\n line_ids = mv_line_pool.search(domain, offset=actual_offset, limit=limit, order=\"date_maturity asc, id asc\")\n lines = mv_line_pool.browse(line_ids)\n make_one_more_loop = False\n for line in lines:\n if line.reconcile_partial_id and \\\n (line.reconcile_partial_id.id in reconcile_partial_ids or \\\n abs(line.debit - line.credit) < abs(line.amount_residual)):\n #if we filtered a line because it is partially reconciled with an already selected line, we must do one more loop\n #in order to get the right number of items in the pager\n make_one_more_loop = True\n continue\n filtered_lines.append(line)\n if line.reconcile_partial_id:\n reconcile_partial_ids.append(line.reconcile_partial_id.id)\n\n if not limit or not make_one_more_loop or len(filtered_lines) >= limit:\n break\n actual_offset = actual_offset + limit\n lines = limit and filtered_lines[:limit] or filtered_lines\n\n # Either return number of lines\n if count:\n return len(lines)\n\n # Or return list of dicts representing the formatted move lines\n else:\n target_currency = st_line.currency_id or st_line.journal_id.currency or st_line.journal_id.company_id.currency_id\n\n mv_lines = mv_line_pool.prepare_move_lines_for_reconciliation_widget(lines, target_currency=target_currency, target_date=st_line.date)\n #~ raise Warning(target_currency.name,mv_lines)\n has_no_partner = not bool(st_line.partner_id.id)\n for line in mv_lines:\n line['has_no_partner'] = has_no_partner\n #~ from pprint import PrettyPrinter\n #~ _logger.warn('\\nmv_lines:\\n%s' % PrettyPrinter(indent=4).pformat(mv_lines))\n return mv_lines\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n", "sub_path": "l10n_se_skv/account_bank_statement_import.py", "file_name": "account_bank_statement_import.py", "file_ext": "py", "file_size_in_byte": 12383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 32, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 32, "usage_type": "name"}, {"api_name": "skv.SKVTransaktionsrapportType1", "line_number": 47, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 47, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 53, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 59, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 65, "usage_type": "call"}, {"api_name": "odoo.fields.Date.to_string", "line_number": 89, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 89, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 89, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "odoo.fields.Date.to_string", "line_number": 90, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 90, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 90, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 90, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 36, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 137, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 137, "usage_type": "name"}]} {"seq_id": "375866318", "text": "#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n# CIS: Commodity Inventory System (ๅ•†ๅ“็›˜็‚น็ณป็ปŸ)\n\nfrom openpyxl import load_workbook\nimport sqlite3\nimport os\nimport time\nimport re\n\n\n\n# ------------------------- ๅ…จๅฑ€ๅ˜้‡ start -------------------------\nDATABASE_PATH = '/Users/Penfree/Workspace/Programs/PythonProjects/Codes/commoditiesImport/commodities.db'\nRT = '' # RECORDS_TABLE_NAME\nMT = 'main' # MAIN_TABLE_NAME\nCONNECT = sqlite3.connect(DATABASE_PATH)\nCURSOR = CONNECT.cursor()\n# ------------------------- ๅ…จๅฑ€ๅ˜้‡ end -------------------------\n\n\n\n# ------------------------- ๅทฅๅ…ท้›† start -------------------------\ndef createTable(t_name):\n ''' ๅˆ›ๅปบ่กจ\n\n t_name: table name, ่กจๅ, ไธ่ƒฝไธบ็ฉบ, ไธบ็ฉบๆ—ถๆŠฅ้”™่ฟ”ๅ›ž\n '''\n log('ๆ–ฐๅปบไฝœไธš่กจ')\n if isinstance(t_name, str) and len(t_name) > 0:\n log('ๆฃ€ๆŸฅไฝœไธš่กจๅ')\n else:\n log('ไฝœไธš่กจๅๆ— ๆ•ˆ')\n return\n #\n sql = \"CREATE TABLE IF NOT EXISTS {} \\\n (idx INTEGER PRIMARY KEY AUTOINCREMENT,\\\n barcode TEXT NOT NULL UNIQUE,\\\n name TEXT NOT NULL,\\\n brief TEXT DEFAULT '',\\\n brand TEXT DEFAULT 'NA',\\\n placeOfOrigin TEXT DEFAULT '่งๅŒ…่ฃ…',\\\n unit TEXT DEFAULT 'PCS',\\\n spec TEXT DEFAULT 'NA',\\\n cate TEXT DEFAULT 'NA',\\\n retailPrice TEXT DEFAULT '0',\\\n shelfLife TEXT DEFAULT '่งๅŒ…่ฃ…',\\\n commission TEXT DEFAULT '0',\\\n points TEXT DEFAULT '1',\\\n discount TEXT DEFAULT '1',\\\n weighed TEXT DEFAULT '0',\\\n settlementType TEXT DEFAULT '2',\\\n viprice TEXT DEFAULT '0',\\\n tradePrice TEXT DEFAULT '0',\\\n storeNum TEXT DEFAULT '0',\\\n cost TEXT DEFAULT '0',\\\n saleOnWechat TEXT DEFAULT '1',\\\n existing TEXT DEFAULT '1');\".format(t_name)\n #\n CURSOR.execute(sql)\n CONNECT.commit()\n\ndef isStr(content):\n ''' ๅˆคๆ–ญๅ†…ๅฎนๆ˜ฏๅฆไธบๆœ‰ๆ•ˆ็š„ๅญ—็ฌฆไธฒ\n 0, ๅฆ: ๅˆ™่ฟ”ๅ›žNone\n 1, ๆ˜ฏ: ๅˆ™่ฟ”ๅ›ž่ขซๅˆคๆ–ญๅฏน่ฑกๆœฌ่บซ\n '''\n if isinstance(content, str):\n return content\n else:\n return\n\ndef log(content):\n ''' ๆŒ‰ๅผ€ๅ‘ๆ–‡ๆกฃๆ ผๅผๆ‰“ๅฐ\n '''\n if isStr(content):\n print('[CIS] {}'.format(content))\n else:\n print('[CIS] ')\n\ndef wLog(content):\n ''' warnings log ่ญฆ็คบๆ—ฅๅฟ—\n '''\n if isStr(content):\n log('**{}**'.format(content))\n \ndef uin(placeholder):\n ''' user input ๆ็คบ็”จๆˆท่พ“ๅ…ฅ\n '''\n if isStr(placeholder):\n content = input('[CIS] {} >> '.format(placeholder))\n return content\n else:\n log('่ฏทๆ็คบ็”จๆˆท่พ“ๅ…ฅ็š„ๅ†…ๅฎน')\n return\n\ndef isArray(obj):\n ''' ๅˆคๆ–ญๅฏน่ฑกๆ˜ฏๅฆไธบๆ•ฐ็ป„, ไธๅŒบๅˆ†ๆ˜ฏlist่ฟ˜ๆ˜ฏtuple\n 0, ่ฟ”ๅ›žNone\n 1, ่ฟ”ๅ›ž่ขซๅˆคๆ–ญๅฏน่ฑกๆœฌ่บซ\n '''\n if isinstance(obj, list) or isinstance(obj, tuple):\n return obj\n else:\n return\n\ndef searchSql(k, t):\n ''' ็”Ÿๆˆๆœ็ดขๅ•†ๅ“SQL่ฏญๅฅ\n \n k: keyword ๆœ็ดขๅ…ณ้”ฎๅญ—\n\n t: table ่กจๅ\n '''\n if isStr(k) and isStr(t):\n return 'SELECT * FROM {0} WHERE barcode LIKE \"%{1}%\" OR name LIKE \"%{1}%\"'.format(t, k)\n else:\n log('ๅ‚ๆ•ฐ้”™่ฏฏ')\n return\n\ndef receipt(prompt, filter = r'', required = False):\n ''' ๆŽฅๆ”ถ็”จๆˆท็š„่พ“ๅ…ฅ\n\n prompt: ่พ“ๅ…ฅๅ†…ๅฎนๆ็คบ\n\n filter: ่ฟ‡ๆปค่ง„ๅˆ™(ๆญฃๅˆ™่กจ่พพๅผ)\n\n required: ๆ˜ฏๅฆๅฟ…ๅกซ(ๅฏไธไผ ๅ€ผ, ้ป˜่ฎคไธบ0)\n '''\n if isStr(prompt):\n pass\n else:\n log('่ฏทๅขžๅŠ ็”จๆˆท่พ“ๅ…ฅ็š„ๅ†…ๅฎน')\n \n while True:\n content = uin(prompt)\n if required:\n if 0 == len(content):\n wLog('ๆœฌ้กนๅฟ…ๅกซ')\n continue\n if isStr(filter) and filter:\n # ไธบๆ’ๅ…ฅๆ•ฐๆฎๅบ“ๅฎšๅˆถ, ้™ๅˆถ็”จๆˆท่พ“ๅ…ฅๅ†…ๅฎน\n r = re.search(filter, content)\n if r:\n # ๆœ‰ๅŒน้…็ป“ๆžœๆ—ถ, ่ฟ”ๅ›žๅŒน้…็ป“ๆžœ\n r = r.group()\n return '{}'.format(r)\n elif required:\n # ๆฒกๆœ‰ๅŒน้…ๅˆฐ็ป“ๆžœๆ—ถ, ๆŸฅ็œ‹ๆœฌ้กนๆ˜ฏๅฆไธบๅฟ…ๅกซ้กน, ่‹ฅๆ˜ฏๅˆ™่ฎฉ็”จๆˆท้‡ๆ–ฐ่พ“ๅ…ฅ, ๅฆๅˆ™่ฟ›ๅ…ฅelse:->่ฟ”ๅ›žไธ€ไธช็ฉบๅญ—็ฌฆไธฒ\n log('่พ“ๅ…ฅๆœ‰่ฏฏ, ่ฏทๆฃ€ๆŸฅ!')\n continue\n else:\n return ''\n else:\n # ่ฟ‡ๆปค่ง„ๅˆ™้”™่ฏฏ\n return content\n\ndef convertArrayToStr(commodityInfo):\n ''' ๅฐ†ๆ•ฐ็ป„็š„ๆ‰€ๆœ‰ๅ…ƒ็ด ๆŒ‰้กบๅบๆ‹ผๆŽฅไธบๅญ—็ฌฆไธฒ\n\n commodityInfo: ๅ•†ๅ“ไฟกๆฏ, ๅˆ—่กจ็ฑปๅž‹\n '''\n result = ''\n if isValidCommodity(commodityInfo):\n split = ''\n for v in commodityInfo:\n # ๅฝ“resultไธบ็ฉบๆ—ถ, ไธ้œ€่ฆ้—ด้š”็ฌฆ\n if result and (not split):\n split = ','\n #\n v_upper = str(v).upper()\n if (not str(v)) or 'NULL' == v_upper or 'NONE' == v_upper:\n tempV = 'Null'\n else:\n tempV = '\\\"{}\\\"'.format(str(v))\n result = '{}{}{}'.format(result, split, tempV)\n return result\n else:\n log('ๅ•†ๅ“ไฟกๆฏๆ— ๆ•ˆ')\n return\n\ndef isValidCommodity(commodityInfo):\n ''' ๆŒ‰ๆ ผๅผๅˆคๆ–ญๅ•†ๅ“ไฟกๆฏๆ˜ฏๅฆๆœ‰ๆ•ˆ\n\n commodityInfo: ้•ฟๅบฆไธบ22็š„ๆ•ฐ็ป„\n '''\n if isArray(commodityInfo) and 22 == len(commodityInfo):\n return commodityInfo\n else:\n return\n\n# ------------------------- ๅทฅๅ…ท้›† end -------------------------\n\n\n\n# ------------------------- ๅŠŸ่ƒฝๆจกๅ— start -------------------------\ndef updateMainTable():\n ''' ๆ›ดๆ–ฐไธปๆ•ฐๆฎๅบ“, ่€—ๆ—ถๆ“ไฝœ, ่ฏท่ฐจๆ…Ž่ฐƒ็”จ.\n ไธบๆๅ‡ๆ•ˆ็އ, ๆœฌๆ“ไฝœๅ–ๆถˆ้€่กŒๆฏ”ๅฏนๅ•†ๅ“ไฟกๆฏ, \n ็›ดๆŽฅๆธ…็ฉบๆ•ฐๆฎๅบ“ไธป่กจๅŽๅ†้‡ๆ–ฐ็Œๅ…ฅๆ•ฐๆฎ\n '''\n createTable('main')\n wLog('ๆœฌๆ“ไฝœไผš่ฆ†็›–ๅŒๆญฅๆ•ฐๆฎๅบ“, ๅฏ่ƒฝไผš้€ ๆˆๆ•ฐๆฎไธขๅคฑ, ่‹ฅๆ— ๅฟ…่ฆ่ฏทๅ‹ฟๆ›ดๆ–ฐ!!!')\n update = uin('ๆ˜ฏๅฆๆ›ดๆ–ฐไธปๆ•ฐๆฎๅบ“(yes?):')\n if 'YES' != update.upper():\n log('ไธปๆ•ฐๆฎๅบ“ๆœชๆ›ดๆ–ฐ')\n return\n #\n path = '/Volumes/storage/DataBank/ๅ•†ๅ“ๆ•ฐๆฎ/03ๅ•†ๅ“ๆ€ป็›ฎๅฝ•/้‡่ฆ_ๅœจๅ”ฎๅ•†ๅ“ๆ€ปๆฑ‡.xlsx'\n try:\n # ๆธ…็ฉบๆ•ฐๆฎๅบ“\n CURSOR.execute('DELETE FROM main')\n CONNECT.commit()\n # ๅ‡†ๅค‡ๆ•ฐๆฎ\n wb = load_workbook(path)\n ws = wb[wb.sheetnames[0]]\n # ่ฟ›ๅบฆ\n progress = 1\n for row in ws.iter_rows(min_row = 2, max_col = 20):\n try:\n cvs = '' # cell value series\n for col in row:\n v = col.value\n cvs = \"{}, \\'{}\\'\".format(cvs, v)\n # ไธปๆ•ฐๆฎๅบ“ไธŽexcelๆ–‡ๆกฃๅฐ‘้ฆ–ๅฐพไธคไธชๅญ—ๆฎต(\"็ดขๅผ•(idx)\" ๅ’Œ \"ๅœจๆกฃ(existing)\")\n insertSql = \"INSERT INTO main VALUES ({}{}, '1')\".format(progress, cvs)\n #=======================================^่ฟ™้‡Œไธ้œ€่ฆ\",\" cvs้ฆ–ไฝๅญ˜ๅœจไธ€ไธช\",\"\n progress += 1\n #\n CURSOR.execute(insertSql)\n CONNECT.commit()\n except:\n continue\n except FileNotFoundError:\n log('\"้‡่ฆ_ๅœจๅ”ฎๅ•†ๅ“ๆ€ปๆฑ‡.xlsx\"ๆ–‡ไปถไธๅญ˜ๅœจ')\n except:\n log('้”™่ฏฏ, ๆ— ๆณ•ๆ›ดๆ–ฐไธปๆ•ฐๆฎๅบ“')\n\ndef creatRecordsTable():\n ''' ๆฏไธ€ๆฌก่ฟ่กŒๆœฌ็จ‹ๅบ, ้ƒฝๅˆ›ๅปบไธ€ไธชไฝœไธš่กจ็”จไบŽ่ฎฐๅฝ•ๅ•†ๅ“ไฟกๆฏๆ“ไฝœ\n '''\n global RT\n log('ไฝœไธš่กจไธๅญ˜ๅœจ, ๅ‡†ๅค‡ๅˆ›ๅปบ')\n # ไปฅๆ—ถ้—ด็ป„ๆˆไฝœไธš่กจๅ, ๆ ผๅผ:\"ๆœˆ_ๆ—ฅ_ๆ—ถ_ๅˆ†_็ง’\"\n t = time.localtime()\n RT = 't_{:02}{:02}{:02}{:02}{:02}'.\\\n format(t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)\n createTable(RT)\n log('ๅˆ›ๅปบๅฎŒๆˆ')\n # ็”จlogๆ–‡ไปถไฟๅญ˜ๅˆ›ๅปบ่ฟ‡็š„ไฝœไธš่กจๅ\n with open('records.log', 'a') as f:\n f.write(RT + '\\n')\n\ndef query():\n ''' ๅ•†ๅ“ไฟกๆฏๆŸฅ่ฏขๅ…ฅๅฃ\n '''\n while True:\n k = uin('่ฏท่พ“ๅ…ฅๅ•†ๅ“ๅๆˆ–ๆกๅฝข็ ( to exit)')\n if 'Q' == k.upper():\n return\n if not k:\n continue\n else:\n queryInRecords(k)\n\ndef queryInRecords(k):\n ''' ๅœจไฝœไธš่กจไธญๆœๅฏปๅ•†ๅ“ไฟกๆฏ, ๅฝ“ๆŸฅๆ— ็ป“ๆžœๆ—ถ่‡ชๅŠจๆœๅฏปไธป่กจ\n 1, ๅฆ‚ๆžœๅญ˜ๅœจไบŽไธป่กจ, ๅˆ™ๅฐ†่ฏฅๆกไฟกๆฏ็›ดๆŽฅๅคๅˆถๅˆฐไฝœไธš่กจ\n 2, ไธๅญ˜ๅœจไบŽไธป่กจ, ๅˆ™ๅˆ›ๅปบๆ–ฐๅ•†ๅ“ไฟกๆฏ.\n k: keyword ๅ…ณ้”ฎๅญ—\n '''\n log('ๆœๅฏปไฝœไธš่กจ')\n if not RT:\n # ไฝœไธš่กจไธๅญ˜ๅœจ, ๅˆ›ๅปบไฝœไธš่กจ, ๅœจไธป่กจๆœๅฏป\n creatRecordsTable()\n queryInMain(k)\n else:\n # ๅœจไฝœไธš่กจไธญๆœๅฏป\n results = resultsFromDB(k, RT)\n if results:\n r = resultSelector(results)\n showCommodityInfo(r)\n modify = uin('ไฟฎๆ”น, ่ทณ่ฟ‡')\n if 'M' == modify.upper():\n newInfo = updateCommodity(r)\n if isValidCommodity(newInfo):\n CURSOR.execute('DELETE FROM {} WHERE barcode = \"{}\"'.format(RT, r[1]))\n CURSOR.execute('INSERT INTO {} VALUES ({})'.format(RT, convertArrayToStr(newInfo)))\n CONNECT.commit()\n else:\n log('ๆœชๅ‘็Žฐๅ•†ๅ“')\n queryInMain(k)\n\ndef queryInMain(k):\n ''' ๅœจไธป่กจไธญๆœๅฏป, ่‹ฅๅ•†ๅ“ไฟกๆฏๅญ˜ๅœจ, ๅˆ™ๅฐ†ๅ•†ๅ“ไฟกๆฏๅคๅˆถๅˆฐไฝœไธš่กจ\n ่‹ฅไธป่กจไธญไธๅญ˜ๅœจ่ฏฅๅ•†ๅ“ไฟกๆฏ, ๅˆ™ๅˆ›ๅปบ!\n '''\n log('ๆœๅฏปไธป่กจ')\n results = resultsFromDB(k, MT)\n if results:\n r = resultSelector(results)\n if isValidCommodity(r):\n showCommodityInfo(r)\n copyInfoToRecords(r)\n else:\n log('ๆœชๅ‘็Žฐๅ•†ๅ“')\n typedIn = uin('ๆ˜ฏๅฆๆทปๅŠ (ABort ๅ–ๆถˆ)?')\n if 'AB' == typedIn.upper():\n return\n else:\n newCommodity = updateCommodity(None, 1)\n if isValidCommodity(newCommodity):\n CURSOR.execute('INSERT INTO {} VALUES ({})'.format(RT, convertArrayToStr(newCommodity)))\n CONNECT.commit()\n else:\n log('ๆ–ฐๅปบ็š„ๅ•†ๅ“ไฟกๆฏๆœ‰่ฏฏ, ่ฏทๆฃ€ๆŸฅๅŽ้‡่ฏ•!')\n\ndef resultsFromDB(k, tableName = MT):\n ''' ๅœจๆ•ฐๆฎๅบ“ไธญๆŸฅๆ‰พๆ•ฐๆฎ\n\n k: keyword, ๅ…ณ้”ฎๅญ—\n\n tableName: ่กจๅ, ้ป˜่ฎคๆ˜ฏMT(Main Table)\n '''\n if isStr(k) and k and isStr(tableName) and tableName:\n pass\n else:\n log('ๆŸฅ่ฏขๆกไปถๆœ‰่ฏฏ')\n return\n #\n sql = searchSql(k, tableName)\n if sql:\n try:\n CURSOR.execute(sql)\n results = CURSOR.fetchall()\n return results\n except:\n log('ๆŸฅ่ฏขๅ•†ๅ“ไฟกๆฏๅ‡บ้”™')\n return\n else:\n log('ๆŸฅ่ฏขๆกไปถไธๆญฃ็กฎ, ๆ— ๆณ•ๆŸฅ่ฏข')\n return\n \ndef resultSelector(resultsList):\n ''' ๅคšไธช็ป“ๆžœๆ—ถ, ่ฎฉ็”จๆˆท้€‰ๆ‹ฉไธ€ไธช็ป“ๆžœ่ฟ”ๅ›ž\n ๅ•ไธช็ป“ๆžœๆ—ถ, ็›ดๆŽฅ่ฟ”ๅ›ž\n '''\n if not isArray(resultsList):\n return\n if 1 == len(resultsList):\n return resultsList[0]\n #\n for r in resultsList:\n showCommodityInfo(r, resultsList.index(r))\n while True:\n idx = uin('่ฏท้€‰ๆ‹ฉๅ•†ๅ“( to exit)')\n if 'q' == idx:\n return\n num = re.search(r'([0-9]+|0+)', idx)\n if num:\n num = int(num.group())\n if num >= len(resultsList):\n log('่ถ…ๅ‡บ่Œƒๅ›ด, ่ฏท้‡ๆ–ฐ้€‰ๆ‹ฉ')\n continue\n else:\n return resultsList[num]\n else:\n log('่พ“ๅ…ฅๆœ‰่ฏฏ, ่ฏท้‡ๆ–ฐ่พ“ๅ…ฅ')\n continue\n \ndef showCommodityInfo(commodityInfo, idx = 0):\n ''' ๅฑ•็คบ็ป“ๆžœ\n '''\n if isValidCommodity(commodityInfo):\n log('{0:02} {1[1]} - {1[2]} - {1[18]} - {1[7]} - {1[10]} - {1[19]} - {1[9]}'.format(idx, commodityInfo))\n\ndef copyInfoToRecords(commodityInfo):\n ''' ไปŽไธป่กจๅคๅˆถๅ•†ๅ“ไฟกๆฏๅˆฐไฝœไธš่กจ\n ๅคๅˆถไน‹ๅ‰่ฏข้—ฎๆ˜ฏๅฆไฟฎๆ”นไฟกๆฏ\n '''\n modify = uin('ไฟฎๆ”น, ่ทณ่ฟ‡')\n if 'M' == modify.upper():\n tempInfo = updateCommodity(commodityInfo)\n else:\n tempInfo = list(commodityInfo)\n tempInfo[0] = 'Null' # ไธบ้ฟๅ…ไธป้”ฎๅ†ฒ็ช้€ ๆˆๅดฉๆบƒ, ่ฎฉๅ…ถ่‡ชๅŒน้…\n #\n if isValidCommodity(tempInfo):\n sql = 'INSERT INTO {} VALUES ({})'.format(RT, convertArrayToStr(tempInfo))\n CURSOR.execute(sql)\n CONNECT.commit()\n\ndef updateCommodity(commodityInfo, method = 0):\n ''' ๆ›ดๆ–ฐๅ•†ๅ“ไฟกๆฏ(ไฟฎๆ”น or ๆ–ฐๅปบ)\n\n mothod: ็ฑปๅž‹\n 0, ไฟฎๆ”นๅ•†ๅ“(้ป˜่ฎคๅ€ผ)\n 1, ๆ–ฐๅปบๅ•†ๅ“\n '''\n newCommodity = []\n if method:\n # ๆ–ฐๅปบๅ•†ๅ“ไฟกๆฏ\n method = True # ไฟฎๆญฃmethodๅ‚ๆ•ฐๅ€ผ็ฑปๅž‹\n newCommodity = ['NULL' for i in range(22)]\n else:\n method = False\n if isValidCommodity(commodityInfo):\n newCommodity = list(commodityInfo)\n else:\n log('ๅ•†ๅ“ไฟกๆฏ้”™่ฏฏ, ๆ— ๆณ•ไฟฎๆ”น')\n return\n #\n name = receipt('ๅ•†ๅ“ๅ', required = method)\n barcode = receipt('ๆกๅฝข็ ', r'\\d{6,16}', required = method)\n spec = receipt('่ง„ๆ ผ')\n shelfLife = receipt('ไฟ่ดจๆœŸ', r'\\d{0,5}')\n cost = receipt('ๆˆๆœฌไปท', r'([1-9]\\d*|0)\\.{0,1}\\d{0,2}')\n retail = receipt('้›ถๅ”ฎไปท', r'([1-9]\\d*|0)\\.{0,1}\\d{0,2}')\n stored = receipt('*** ๅบ“ๅญ˜ ***', r'[1-9]\\d{0,5}')\n\n #\n if name:\n newCommodity[2] = name\n if barcode:\n newCommodity[1] = barcode\n if spec:\n newCommodity[7] = spec\n if shelfLife:\n newCommodity[10] = shelfLife\n if cost:\n newCommodity[19] = cost\n if retail:\n newCommodity[9] = retail\n if stored:\n newCommodity[18] = stored\n\n # ๆ‰‹ๅŠจไฟฎๆ”นไปฅไธ‹ไธคไธชๅ€ผ(ๆ˜ฏๅฆๅพฎๅบ—้”€ๅ”ฎ, ๆ˜ฏๅฆๅญ˜ๅœจmain table)\n # ๅฝ“ๅ•†ๅ“ๅ/ๆกๅฝข็ /ๆˆๆœฌไปท/้›ถๅ”ฎไปท/่ง„ๆ ผไปปๆ„ไธ€ไธชๅ‚ๆ•ฐๆ›ดๆ”น้ƒฝ่ง†ไฝœๆ–ฐไบงๅ“, ้ƒฝๅฐ†'ๆ˜ฏๅฆๅญ˜ๅœจไธป่กจ'ๅ€ผ่ฎพไธบ0.\n if (name or barcode or cost or retail) and ('0' != newCommodity[-1]):\n newCommodity[-1] = '0'\n\n if '1' != newCommodity[-2]:\n newCommodity[-2] = '1'\n return newCommodity\n# ------------------------- ๅŠŸ่ƒฝๆจกๅ— end -------------------------\n\n\n\n# ------------------------- ็จ‹ๅบๅ…ฅๅฃ start -------------------------\ndef main():\n ''' ไธปๅ‡ฝๆ•ฐ, ็จ‹ๅบๅ…ฅๅฃ\n '''\n log('ๆ‚จๅฅฝ, ๆฌข่ฟŽไฝฟ็”จๆœฌ็ณป็ปŸ.')\n updateMainTable()\n query()\n # ไปปๅŠกๅฎŒๆˆๅŽ, ๆ–ญๅผ€ๆ•ฐๆฎๅบ“่ฟžๆŽฅ\n CONNECT.close()\n#\nmain()\n# ------------------------- ็จ‹ๅบๅ…ฅๅฃ end -------------------------", "sub_path": "Codes/commoditiesImport/CIS.py", "file_name": "CIS.py", "file_ext": "py", "file_size_in_byte": 14202, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "re.search", "line_number": 142, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 214, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 244, "usage_type": "call"}, {"api_name": "re.search", "line_number": 358, "usage_type": "call"}]} {"seq_id": "187076374", "text": "from PIL import Image\n\nopen('ASCIItext.txt', 'w').close()\nfileToWriteTo=open(\"ASCIItext.txt\", \"w\")\n\n#change this to your file name\nfileName=\"received_270635460247420.jpg\"\nimage=Image.open(fileName)\nimageData=image.load()\n\nimageDim=image.size\n\nfor i in range(0,imageDim[1]):\n if i%2==0:\n continue\n\n for j in range(0,imageDim[0]):\n if (imageData[j,i][0]<10 and imageData[j,i][1]>245 and imageData[j,i][2]<10):\n fileToWriteTo.write(\"!\")\n continue\n greyColor=(0.299*imageData[j,i][0]+0.587*imageData[j,i][1]+0.114*imageData[j,i][2])\n ASCIIgreyScale=\" .:-=+*#%@\"[::-1]\n fileToWriteTo.write(ASCIIgreyScale[int(greyColor//25.6)])\n fileToWriteTo.write(\"\\n\")\nfileToWriteTo.close()\nfileToWriteTo=open(\"ASCIItext.txt\", \"r\")\nfileList=fileToWriteTo.read().split(\"\\n\")\nprint(fileList)\n", "sub_path": "MSIwebsite/convertImageToASCII/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}]} {"seq_id": "417131583", "text": "#!/usr/bin/env python3\n\nfrom pymedext_core import pymedext\nfrom intervaltree import Interval,IntervalTree\nimport argparse\nimport json\nfrom datetime import datetime\nimport psycopg2\nimport psycopg2.extras\nfrom typing import Iterator, Dict, Any, Optional\nimport io\nfrom os import listdir\nfrom os.path import isfile, join\nimport pandas\nimport logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format='%(asctime)s -- %(name)s - %(levelname)s : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\",\"--inputFolder\", help=\"input annotationsFiles\")\n\nargs = parser.parse_args()\n\nlogger.info(\"#####1\")\nnlp_workflow=\"nlp_workflow_V1\"\nfilterType = [\"sentence\"]\nmypath = args.inputFolder\nif not mypath.endswith(\"/\"):\n mypath = mypath+\"/\"\nlogger.info(\"###2\")\nallFiles = [mypath+f for f in listdir(mypath) if isfile(join(mypath, f))]\ndemoDoc=pymedext.Document(raw_text=\"load\",ID=\"patientID\", pathToconfig= allFiles)\ndemoDoc, __tree, __sentencepos, thisRoot =pymedext.normalize.uri(demoDoc)\ninfo_pat = [1234,1234,1,str(datetime.now().strftime(\"%Y-%m-%d\"))]\nlogger.info(info_pat)\ndict_note={\n \"person_id\":int(info_pat[0]), # NIP\n \"note_text\":demoDoc.annotations[0].value,\n \"visit_occurrence_id\":info_pat[1], # NDA\n \"note_id\":int(info_pat[2])#\"ni_doc\"\n }\nnote_id =int(info_pat[2])#\nthisTime = datetime.strptime(info_pat[-1], '%Y-%m-%d')\nnote_nlp_id = 1#\"nlp_id\"\nlogger.info(\"############\")\nannotations, dict_table_note, dict_table_person = pymedext.omop.buildNoteNlP(thisRoot, dict_note, note_id,note_nlp_id, nlp_workflow,thisTime, filterType,True)\nlogger.info(annotations)\nlogger.info(dict_table_note)\nlogger.info(dict_table_person)\n", "sub_path": "src/pymedext_to_omop.py", "file_name": "pymedext_to_omop.py", "file_ext": "py", "file_size_in_byte": 1724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "pymedext_core.pymedext.Document", "line_number": 33, "usage_type": "call"}, {"api_name": "pymedext_core.pymedext", "line_number": 33, "usage_type": "name"}, {"api_name": "pymedext_core.pymedext.normalize.uri", "line_number": 34, "usage_type": "call"}, {"api_name": "pymedext_core.pymedext.normalize", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pymedext_core.pymedext", "line_number": 34, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "pymedext_core.pymedext.omop.buildNoteNlP", "line_number": 47, "usage_type": "call"}, {"api_name": "pymedext_core.pymedext.omop", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pymedext_core.pymedext", "line_number": 47, "usage_type": "name"}]} {"seq_id": "447258446", "text": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom runway import RunwayModel\nfrom detectron.core.config import assert_and_infer_cfg\nfrom detectron.core.config import cfg\nfrom detectron.core.config import merge_cfg_from_file\nfrom detectron.utils.io import cache_url\nimport detectron.core.test_engine as infer_engine\nimport detectron.datasets.dummy_datasets as dummy_datasets\nimport detectron.utils.c2 as c2_utils\nimport detectron.utils.vis as vis_utils\n\nc2_utils.import_detectron_ops()\n#cv2.ocl.setUseOpenCL(False)\n\ndetectron = RunwayModel()\n\n\ndef get_result_json(boxes, segms, keypoints, thresh=0.7, dataset=None):\n if isinstance(boxes, list):\n boxes, segms, keypoints, classes = convert_from_cls_format(\n boxes, segms, keypoints)\n if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:\n return\n dataset_keypoints, _ = keypoint_utils.get_keypoints()\n if segms is not None:\n masks = mask_util.decode(segms)\n # Display in largest to smallest order to reduce occlusion\n areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n sorted_inds = np.argsort(-areas)\n sorted_inds = np.argsort(-boxes[:,4])\n results = {'mask_rle':segms, 'objects':[]}\n for i in sorted_inds:\n score = boxes[i, -1]\n if score < thresh:\n continue\n bbox = boxes[i, :4]\n class_idx = classes[i]\n class_text = dataset.classes[class_idx]\n mask_idx = i\n mask = masks[:, :, mask_idx]\n #kps = keypoints[i]\n _, contour, hier = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours = [ c.reshape((-1, 2)).tolist() for c in contour ]\n obj = {'box':bbox.tolist(), 'class':class_text, 'mask_idx':mask_idx, 'contours':contours, 'score':float(score)}\n results['objects'].append(obj)\n return results\n\n\n@detectron.setup\ndef setup():\n global dummy_coco_dataset\n cfg_file = 'configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml'\n weights = 'https://dl.fbaipublicfiles.com/detectron/35861858/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml.02_32_51.SgT4y1cO/output/train/coco_2014_train:coco_2014_valminusminival/generalized_rcnn/model_final.pkl'\n merge_cfg_from_file(cfg_file)\n cfg.NUM_GPUS = 1\n weights = cache_url(weights, cfg.DOWNLOAD_CACHE)\n assert_and_infer_cfg(cache_urls=False)\n assert not cfg.MODEL.RPN_ONLY, \\\n 'RPN models are not supported'\n assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \\\n 'Models that require precomputed proposals are not supported'\n model = infer_engine.initialize_model_from_cfg(weights)\n dummy_coco_dataset = dummy_datasets.get_coco_dataset()\n return model\n\n\n@detectron.command('detect', inputs={'image': 'image'}, outputs={'output': 'image'})\ndef detect(model, inp):\n im = np.array(inp['image'])\n with c2_utils.NamedCudaScope(0):\n cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(model, im, None)#, timers=timers)\n #results = get_result_json(cls_boxes, cls_segms, cls_keyps, thresh=args.save_thresh, dataset=dummy_coco_dataset)\n im_new = vis_utils.vis_one_image_opencv(im[:, :, ::-1], cls_boxes, segms=cls_segms, keypoints=cls_keyps, thresh=0.9, kp_thresh=2, show_box=True, dataset=dummy_coco_dataset, show_class=True)\n out = np.array(im_new)\n output = np.clip(out, 0, 255).astype(np.uint8)\n return dict(output=output)\n\n\nif __name__ == '__main__':\n detectron.run()\n", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 3511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "detectron.utils.c2.import_detectron_ops", "line_number": 13, "usage_type": "call"}, {"api_name": "detectron.utils.c2", "line_number": 13, "usage_type": "name"}, {"api_name": "detectron.core.config", "line_number": 16, "usage_type": "name"}, {"api_name": "runway.RunwayModel", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 31, "usage_type": "call"}, {"api_name": "detectron.core.config.merge_cfg_from_file", "line_number": 55, "usage_type": "call"}, {"api_name": "detectron.core.config.cfg.NUM_GPUS", "line_number": 56, "usage_type": "attribute"}, {"api_name": "detectron.core.config.cfg", "line_number": 56, "usage_type": "name"}, {"api_name": "detectron.utils.io.cache_url", "line_number": 57, "usage_type": "call"}, {"api_name": "detectron.core.config.cfg.DOWNLOAD_CACHE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "detectron.core.config.cfg", "line_number": 57, "usage_type": "name"}, {"api_name": "detectron.core.config.assert_and_infer_cfg", "line_number": 58, "usage_type": "call"}, {"api_name": "detectron.core.config.cfg.MODEL", "line_number": 59, "usage_type": "attribute"}, {"api_name": "detectron.core.config.cfg", "line_number": 59, "usage_type": "name"}, {"api_name": "detectron.core.config.cfg.TEST", "line_number": 61, "usage_type": "attribute"}, {"api_name": "detectron.core.config.cfg", "line_number": 61, "usage_type": "name"}, {"api_name": "detectron.core.test_engine.initialize_model_from_cfg", "line_number": 63, "usage_type": "call"}, {"api_name": "detectron.core.test_engine", "line_number": 63, "usage_type": "name"}, {"api_name": "detectron.datasets.dummy_datasets.get_coco_dataset", "line_number": 64, "usage_type": "call"}, {"api_name": "detectron.datasets.dummy_datasets", "line_number": 64, "usage_type": "name"}, {"api_name": "detectron.core.config.setup", "line_number": 50, "usage_type": "attribute"}, {"api_name": "detectron.core.config", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "detectron.utils.c2.NamedCudaScope", "line_number": 71, "usage_type": "call"}, {"api_name": "detectron.utils.c2", "line_number": 71, "usage_type": "name"}, {"api_name": "detectron.core.test_engine.im_detect_all", "line_number": 72, "usage_type": "call"}, {"api_name": "detectron.core.test_engine", "line_number": 72, "usage_type": "name"}, {"api_name": "detectron.utils.vis.vis_one_image_opencv", "line_number": 74, "usage_type": "call"}, {"api_name": "detectron.utils.vis", "line_number": 74, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 76, "usage_type": "attribute"}, {"api_name": "detectron.core.config.command", "line_number": 68, "usage_type": "call"}, {"api_name": "detectron.core.config", "line_number": 68, "usage_type": "name"}, {"api_name": "detectron.core.config.run", "line_number": 81, "usage_type": "call"}, {"api_name": "detectron.core.config", "line_number": 81, "usage_type": "name"}]} {"seq_id": "210934796", "text": "from web import form\r\nimport json, hashlib, zipfile, os, glob, time\r\nimport web, utils, decompiler\r\n\r\n#web.config.debug = False\r\nrender = web.template.render('templates/')\r\nrender_base = web.template.render('templates/', base='base')\r\n\r\nurls = ( '/', 'Index',\r\n\t'/about', 'Index',\r\n\t'/copyright', 'Index',\r\n\t'/login', 'Login',\r\n\t'/upload', 'Uploader',\r\n\t'/decompile/([a-fA-F0-9]{32})/?', 'Decompiler',\r\n\t\r\n\t#'/busca/()(q=([a-zA-Z0-9]*))?', 'Buscador', #EL BUSCADOR SE HACE EN JS\r\n\t#'/report/([a-fA-F0-9]{32})/busca/(q=([a-zA-Z0-9]*))?', 'Buscador',\r\n\t'/report/([a-fA-F0-9]{32})/info/classes.json', 'JsonAllClasses',\r\n\t'/report/([a-fA-F0-9]{32})', 'RedirReport',\r\n\t'/report/([a-fA-F0-9]{32})/', 'Report',\r\n\t'/report/([a-fA-F0-9]{32})/(0x.[0-9a-fA-F]+|@.*/.*)', 'JsonRecursos',\r\n\t'/report/([a-fA-F0-9]{32})/res/(@.*/.*)', 'RecursosView',#TODO\r\n\t'/report/([a-fA-F0-9]{32})/(([a-zA-Z_][a-zA-Z_0-9$]*\\.?)*)', 'View',\r\n\r\n\t)\r\napp = web.application(urls, globals())\r\n\r\nsesion = web.session.Session(app, web.session.DiskStore('sessions'))\r\n\r\n########## LOGIN ZONE ##########\r\ndef check_sesion():\r\n\tif not web.config.debug and not sesion.get('logged_in', False):\r\n\t\traise web.seeother('/login')\r\n\r\nclass Login:\r\n\tlogin_form = form.Form(\r\n\t\tform.Textbox('username'),\r\n\t\tform.Password('password'),\r\n\t\tform.Button('Login'),\r\n\t\t)\r\n\r\n\tdef valid_user(self):\r\n\t\ti = web.input()\r\n\t\treturn i.get('username') == i.get('password')\r\n\r\n\tdef GET(self, out=None):\r\n\t\tif web.input().get('out') is not None:\r\n\t\t\tsesion.logged_in = False\r\n\t\t\traise web.seeother('/')\r\n\t\treturn render_base.login(self.login_form())\r\n\r\n\tdef POST(self):\r\n\t\tlog = self.login_form()\r\n\t\tif log.validates() and self.valid_user():\r\n\t\t\tsesion.logged_in = True\r\n\t\t\traise web.seeother('/')\r\n\t\tlog.valid = None\r\n\t\treturn render_base.login(log)\r\n\r\n########## FUNCTION ZONE ##########\r\ndef checkCache(md5):\r\n\tif web.config.debug: return\r\n\tlast_mod = utils.getLastModifed(utils.REPORTS_DIR + md5)\r\n\tif 'HTTP_IF_MODIFIED_SINCE' in web.ctx.env and web.ctx.env['HTTP_IF_MODIFIED_SINCE'] == last_mod:\r\n\t\traise web.notmodified()\r\n\tetag = utils.getEtag(last_mod)\r\n\tif 'HTTP_IF_NONE_MATCH' in web.ctx.env and web.ctx.env['HTTP_IF_NONE_MATCH'] == etag:\r\n\t\traise web.notmodified()\r\n\tweb.header('Last-Modified', last_mod)\r\n\tweb.header('Etag', etag)\r\n\r\nclass Index:\r\n\tdef GET(self):\r\n\t\tcheck_sesion()\r\n\t\treturn render_base.index()\r\n\r\nclass Decompiler:\r\n\tdef GET(self, md5):\r\n\t\tcheck_sesion()\r\n\t\tfiles = glob.glob(utils.FILE_UPLOAD_DIR + md5 + '.*')\r\n\t\tif files:\r\n\t\t\tdec = decompiler.Decompiler()\r\n\t\t\tdec.decompile(files[0])\r\n\t\t\traise web.seeother('/report/'+md5+'/')\r\n\t\telse:\r\n\t\t\treturn render_base.upload(\"El fichero no se encuentra en el sistema\")\r\n\r\nclass RedirReport:\r\n\tdef GET(self, md5):\r\n\t\traise web.seeother('/report/'+md5+'/')\r\nclass Report:\r\n\tdef GET(self, md5):\r\n\t\tcheck_sesion()\r\n\t\tcheckCache(md5)\r\n\t\tif os.path.isdir(utils.REPORTS_DIR + md5):\r\n\t\t\t#TODO ver que pasa cuando hay un dex y un apk con el mismo md5\r\n\t\t\tsample = []\r\n\t\t\tif os.path.isfile(utils.REPORTS_DIR + md5 + \"/sample.apk\"):\r\n\t\t\t\tsample += ['Descargar muestra']\r\n\t\t\tif os.path.isfile(utils.REPORTS_DIR + md5 + \"/classes.dex\"):\r\n\t\t\t\tsample += ['Descargar dex']\r\n\t\t\tif os.path.isfile(utils.REPORTS_DIR + md5 + \"/classes.dex.dex2jar.jar\"):\r\n\t\t\t\tsample += ['Descargar jar']\r\n\t\t\t#TODO insertar enlace si es un dex y tengo el apk\r\n\r\n\t\t\tmanifest = \"\"\r\n\t\t\tif os.path.isfile(utils.REPORTS_DIR + md5 +'/smali/AndroidManifest.xml'):\r\n\t\t\t\twith open(utils.REPORTS_DIR + md5 +'/smali/AndroidManifest.xml', 'r') as fp:\r\n\t\t\t\t\tmanifest = fp.read()\r\n\t\t\t\t\tfp.close()\r\n\t\t\trecursos = utils.findRecursos(utils.REPORTS_DIR + md5 +'/smali/res/')\r\n\t\t\treturn render_base.report(md5, sample, manifest, recursos)\r\n\t\telif glob.glob(utils.FILE_UPLOAD_DIR + md5 + '.*'):\r\n\t\t\traise web.seeother('/decompile/'+md5)\r\n\t\telse:\r\n\t\t\treturn render_base.upload(\"El fichero no se encuentra en el sistema\")\r\n\r\nclass JsonRecursos():\r\n\tdef GET(self, md5, key):\r\n\t\tcheck_sesion()\r\n\t\tcheckCache(md5)\r\n\t\tweb.header('Content-Type', 'application/json')\r\n\t\treturn utils.getRecurso(md5, key)\r\n\r\nclass JsonAllClasses():\r\n\tdef GET(self, md5):\r\n\t\tcheck_sesion()\r\n\t\tcheckCache(md5)\r\n\t\tret = []\r\n\t\tclases = utils.findPackages(utils.REPORTS_DIR + md5 +'/ddx/')\r\n\t\tfor pack in clases:\r\n\t\t\tret += [{\"pack\":pack,\"clases\":clases[pack]}]\r\n\t\tweb.header('Content-Type', 'application/json')\r\n\t\treturn json.dumps(ret)\r\n\r\nclass RecursosView():\r\n\tdef GET(self, md5, key):\r\n\t\tcheck_sesion()\r\n\t\tcheckCache(md5)\r\n\t\tabsolute_path = utils.getPathFromRecurso(md5, key);\r\n\t\tif absolute_path:\r\n\t\t\tif absolute_path.endswith(\".xml\"):\r\n\t\t\t\tfile = \"\"\r\n\t\t\t\twith open(absolute_path, 'r') as fp:\r\n\t\t\t\t\tfile = fp.read()\r\n\t\t\t\t\tfp.close()\r\n\t\t\t\treturn render_base.view(md5, file, \"xml:nocontrols\",True)\r\n\t\t\traise web.seeother(absolute_path[1:])\r\n\t\treturn web.notfound()\r\n\r\nclass View():\r\n\tdef GET(self, md5, clase, class_name):\r\n\t\tcheck_sesion()\r\n\t\tcheckCache(md5)\r\n\t\ttype = None\r\n\t\tfile = clase.replace(\".\",\"/\")\r\n\t\t\r\n\t\tif os.path.isfile(utils.REPORTS_DIR+md5+\"/smali/smali/\"+file+\".smali\"):\r\n\t\t\ttype = \"smali:nocontrols\"\r\n\t\t\tfile = utils.REPORTS_DIR+md5+\"/smali/smali/\"+file+\".smali\"\r\n\t\telif os.path.isfile(utils.REPORTS_DIR+md5+\"/ddx/\"+file+\".ddx\"):\r\n\t\t\ttype = \"ddx:nocontrols\"\r\n\t\t\tfile = utils.REPORTS_DIR+md5+\"/ddx/\"+file+\".ddx\"\r\n\t\telse:\r\n\t\t\treturn web.notfound()\r\n\t\t\r\n\t\twith open(file, 'r') as fp:\r\n\t\t\tfile = None\r\n\t\t\tfile = fp.read()\r\n\t\t\tfp.close()\r\n\t\treturn render_base.view(md5, file, type, False)\r\n\r\nclass Uploader:\r\n\tdef GET(self):\r\n\t\tcheck_sesion()\r\n\t\treturn render_base.upload(None)\r\n\r\n\tdef POST(self):\r\n\t\tcheck_sesion()\r\n\t\tficheros = web.input(android_file={})\r\n\t\terror = \"Solo se soportan ficheros APK y DEX\"\r\n\r\n\t\tresumen = hashlib.md5()\r\n\t\tresumen.update(ficheros['android_file'].value)\r\n\t\tmd5_file = resumen.hexdigest()\r\n\t\t#TODO ya tengo el fichero?\r\n\r\n\t\tmagic = ficheros['android_file'].file.read(3)\r\n\t\tfile = None\r\n\t\tif magic[:2] == \"PK\":\r\n\t\t\tfile = utils.FILE_UPLOAD_DIR + md5_file + \".apk\"\r\n\t\telif magic == \"dex\":\r\n\t\t\tfile = utils.FILE_UPLOAD_DIR + md5_file + \".dex\"\r\n\r\n\t\t#TODO ya tengo el fichero?\r\n\t\tif file:\r\n\t\t\twith open(file, 'wb') as fp:\r\n\t\t\t\tfp.write(magic)\r\n\t\t\t\tfp.write(ficheros['android_file'].file.read())\r\n\t\t\t\tfp.close()\r\n\t\t\t\tif file.endswith(\".apk\"):\r\n\t\t\t\t\terror = \"No es un fichero APK valido\"\r\n\t\t\t\t\tif zipfile.is_zipfile(file):\r\n\t\t\t\t\t\tzip = zipfile.ZipFile(file, 'r')\r\n\t\t\t\t\t\terror_file = zip.testzip()\r\n\t\t\t\t\t\tif error_file is not None:\r\n\t\t\t\t\t\t\terror = \"Existen fichero erroneos en el APK (\"+error_file+\")\"\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\telementos = zip.namelist()\r\n\t\t\t\t\t\t\tif \"classes.dex\" in elementos and \"AndroidManifest.xml\" in elementos:\r\n\t\t\t\t\t\t\t\terror = None\r\n\t\t\t\t\t\tzip.close()\r\n\t\t\t\telse:\r\n\t\t\t\t\terror = None\r\n\t\t\t\tif error:\r\n\t\t\t\t\tos.remove(file)\r\n\t\tif error:\r\n\t\t\treturn render_base.upload(error)\r\n\t\traise web.seeother('/report/'+md5_file)\r\n\r\n\r\n\r\n\r\n\r\ndef test():\r\n\tapp.run()\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run()\r\n", "sub_path": "Android Web Inspector/androidInspector.py", "file_name": "androidInspector.py", "file_ext": "py", "file_size_in_byte": 7050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "84", "api": [{"api_name": "web.template.render", "line_number": 6, "usage_type": "call"}, {"api_name": "web.template", "line_number": 6, "usage_type": "attribute"}, {"api_name": "web.template.render", "line_number": 7, "usage_type": "call"}, {"api_name": "web.template", "line_number": 7, "usage_type": "attribute"}, {"api_name": "web.application", "line_number": 26, "usage_type": "call"}, {"api_name": "web.session.Session", "line_number": 28, "usage_type": "call"}, {"api_name": "web.session", "line_number": 28, "usage_type": "attribute"}, {"api_name": "web.session.DiskStore", "line_number": 28, "usage_type": "call"}, {"api_name": "web.config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "web.seeother", "line_number": 33, "usage_type": "call"}, {"api_name": "web.form.Form", "line_number": 36, "usage_type": "call"}, {"api_name": "web.form", "line_number": 36, "usage_type": "name"}, {"api_name": "web.form.Textbox", "line_number": 37, "usage_type": "call"}, {"api_name": "web.form", "line_number": 37, "usage_type": "name"}, {"api_name": "web.form.Password", "line_number": 38, "usage_type": "call"}, {"api_name": "web.form", "line_number": 38, "usage_type": "name"}, {"api_name": "web.form.Button", "line_number": 39, "usage_type": "call"}, {"api_name": "web.form", "line_number": 39, "usage_type": "name"}, {"api_name": "web.input", "line_number": 43, "usage_type": "call"}, {"api_name": "web.input", "line_number": 47, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 49, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 56, "usage_type": "call"}, {"api_name": "web.config", "line_number": 62, "usage_type": "attribute"}, {"api_name": "utils.getLastModifed", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.REPORTS_DIR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "web.ctx", "line_number": 64, "usage_type": "attribute"}, {"api_name": "web.notmodified", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.getEtag", "line_number": 66, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 67, "usage_type": "attribute"}, {"api_name": "web.notmodified", "line_number": 68, "usage_type": "call"}, {"api_name": "web.header", "line_number": 69, "usage_type": "call"}, {"api_name": "web.header", "line_number": 70, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.FILE_UPLOAD_DIR", "line_number": 80, "usage_type": "attribute"}, {"api_name": "decompiler.Decompiler", "line_number": 82, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 84, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 98, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 100, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 107, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 108, "usage_type": "attribute"}, {"api_name": "utils.findRecursos", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.REPORTS_DIR", "line_number": 111, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.FILE_UPLOAD_DIR", "line_number": 113, "usage_type": "attribute"}, {"api_name": "web.seeother", "line_number": 114, "usage_type": "call"}, {"api_name": "web.header", "line_number": 122, "usage_type": "call"}, {"api_name": "utils.getRecurso", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.findPackages", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.REPORTS_DIR", "line_number": 130, "usage_type": "attribute"}, {"api_name": "web.header", "line_number": 133, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 134, "usage_type": "call"}, {"api_name": "utils.getPathFromRecurso", "line_number": 140, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 148, "usage_type": "call"}, {"api_name": "web.notfound", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 158, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 161, "usage_type": "attribute"}, {"api_name": "utils.REPORTS_DIR", "line_number": 163, "usage_type": "attribute"}, {"api_name": "web.notfound", "line_number": 165, "usage_type": "call"}, {"api_name": "web.input", "line_number": 180, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 183, "usage_type": "call"}, {"api_name": "utils.FILE_UPLOAD_DIR", "line_number": 191, "usage_type": "attribute"}, {"api_name": "utils.FILE_UPLOAD_DIR", "line_number": 193, "usage_type": "attribute"}, {"api_name": "zipfile.is_zipfile", "line_number": 203, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 204, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 216, "usage_type": "call"}, {"api_name": "web.seeother", "line_number": 219, "usage_type": "call"}]} {"seq_id": "43217328", "text": "\"\"\"\nFunctions to unpack Simrad EK60 .raw files\nModification from original source (mentioned below) included:\n- python 3.6 compatibility\n- strip off dependency on other mi-instrument functions\n- unpack split-beam angle data\n- unpack various additional variables\n- support saving to netCDF file\n\nOriginal source for unpacking power data part:\noceanobservatories/mi-instrument @https://github.com/oceanobservatories/mi-instrument\nAuthors: Ronald Ronquillo & Richard Han\n\n\"\"\"\n\n\nfrom collections import defaultdict\nfrom struct import unpack_from, unpack\nimport numpy as np\nimport re\nimport os\nfrom datetime import datetime as dt\nfrom matplotlib.dates import date2num\nimport pytz\nimport echopype as ep\n\n\n# Set constants for unpacking .raw files\nBLOCK_SIZE = 1024*4 # Block size read in from binary file to search for token\nLENGTH_SIZE = 4\nDATAGRAM_HEADER_SIZE = 12\nCONFIG_HEADER_SIZE = 516\nCONFIG_TRANSDUCER_SIZE = 320\n\n# set global regex expressions to find all sample, annotation and NMEA sentences\nSAMPLE_REGEX = b'RAW\\d{1}'\nSAMPLE_MATCHER = re.compile(SAMPLE_REGEX, re.DOTALL)\nFILENAME_REGEX = r'(?P\\S*)-D(?P\\d{1,})-T(?P